diff --git a/.github/workflows/docs-pages.yaml b/.github/workflows/docs-pages.yaml new file mode 100644 index 00000000000..b40bb391062 --- /dev/null +++ b/.github/workflows/docs-pages.yaml @@ -0,0 +1,43 @@ +name: "Docs / Publish" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows + +on: + push: + branches: + - scylla-3.x + paths: + - 'docs/**' + - 'faq/**' + - 'manual/**' + - 'changelog/**' + - 'upgrade_guide/**' + workflow_dispatch: + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + persist-credentials: false + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: 3.7 + - name: Set up JDK 1.8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + - name: Set up env + run: make -C docs setupenv + - name: Build redirects + run: make -C docs redirects + - name: Build docs + run: make -C docs multiversion + - name: Deploy docs to GitHub Pages + run: ./docs/_utils/deploy.sh + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml new file mode 100644 index 00000000000..73f1e34c65b --- /dev/null +++ b/.github/workflows/docs-pr.yaml @@ -0,0 +1,36 @@ +name: "Docs / Build PR" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows + +on: + pull_request: + branches: + - scylla-3.x + paths: + - 'docs/**' + - 'faq/**' + - 'manual/**' + - 'changelog/**' + - 'upgrade_guide/**' + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + persist-credentials: false + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: 3.7 + - name: Set up JDK 1.8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + - name: Set up env + run: make -C docs setupenv + - name: Build docs + run: make -C docs test \ No newline at end of file diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml deleted file mode 100644 index 27ebb4ce1bf..00000000000 --- a/.github/workflows/pages.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: "CI Docs" - -on: - push: - branches: - - scylla-3.x - paths: - - 'docs/**' - - 'faq/**' - - 'manual/**' - - 'changelog/**' - - 'upgrade_guide/**' -jobs: - release: - name: Build - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - persist-credentials: false - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - name: Set up JDK 1.8 - uses: actions/setup-java@v1 - with: - java-version: 1.8 - - name: Build Sphinx docs - run: | - export PATH=$PATH:~/.local/bin - cd docs - make multiversion - - name: Deploy - run : ./docs/_utils/deploy.sh - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tests-reports-4x@v1.yml b/.github/workflows/tests-reports-4x@v1.yml new file mode 100644 index 00000000000..02921c9a9cd --- /dev/null +++ b/.github/workflows/tests-reports-4x@v1.yml @@ -0,0 +1,24 @@ +# This is a workflow that runs after 'Tests' +# workflow, because the 'Tests' (PR) workflow +# has insufficient permissions to write +# GitHub Actions checks. +name: 'Tests Reports (Driver 4.x)' +on: + workflow_run: + workflows: ['Tests (Driver 4.x)'] + types: + - completed +jobs: + report: + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - name: Generate test report + uses: dorny/test-reporter@v1 + with: + artifact: 'test-results' + name: 'Test report' + path: '**/TEST-TestSuite.xml' + reporter: java-junit + list-tests: failed \ No newline at end of file diff --git a/.github/workflows/tests-reports@v1.yml b/.github/workflows/tests-reports@v1.yml new file mode 100644 index 00000000000..a6d1b3b5b9e --- /dev/null +++ b/.github/workflows/tests-reports@v1.yml @@ -0,0 +1,24 @@ +# This is a workflow that runs after 'Tests' +# workflow, because the 'Tests' (PR) workflow +# has insufficient permissions to write +# GitHub Actions checks. +name: 'Tests Reports' +on: + workflow_run: + workflows: ['Tests'] + types: + - completed +jobs: + report: + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - name: Generate test report + uses: dorny/test-reporter@v1 + with: + artifact: 'test-results' + name: 'Test report' + path: '**/TEST-TestSuite.xml' + reporter: java-junit + list-tests: failed diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml deleted file mode 100644 index 5baa4e9d96f..00000000000 --- a/.github/workflows/tests.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Tests - -on: - push: - branches: [ scylla-3.x ] - pull_request: - branches: [ scylla-3.x ] - -jobs: - run-unit-tests: - name: Run unit tests - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Set up JDK 8 - uses: actions/setup-java@v2 - with: - java-version: '8' - distribution: 'adopt' - - - name: Run unit tests - run: mvn -B test - - run-cassandra-integration-tests: - name: Run integration tests on Cassandra - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Set up JDK 8 - uses: actions/setup-java@v2 - with: - java-version: '8' - distribution: 'adopt' - - - name: Setup environment (Integration test on Cassandra 3.11.11) - run: | - sudo apt-get update - sudo apt-get install -y python3 python3-pip python-is-python3 python3-boto3 - sudo pip3 install https://github.com/scylladb/scylla-ccm/archive/master.zip - - - name: Run integration tests on Cassandra 3.11.11 - run: mvn -B verify -Pshort -Dcassandra.version=3.11.11 - - - uses: actions/upload-artifact@v2 - if: ${{ failure() }} - with: - name: ccm-logs-cassandra-3.11.11 - path: /tmp/*-0/ccm*/node*/logs/* - - run-scylla-integration-tests: - name: Run integration tests on Scylla - runs-on: ubuntu-latest - - strategy: - matrix: - scylla-version: ['4.4.4', '4.3.6'] - - steps: - - uses: actions/checkout@v2 - - - name: Set up JDK 8 - uses: actions/setup-java@v2 - with: - java-version: '8' - distribution: 'adopt' - - - name: Setup environment (Integration test on Scylla ${{ matrix.scylla-version }}) - run: | - sudo apt-get update - sudo apt-get install -y python3 python3-pip python-is-python3 python3-boto3 - sudo pip3 install https://github.com/scylladb/scylla-ccm/archive/master.zip - sudo sh -c "echo 2097152 >> /proc/sys/fs/aio-max-nr" - - - name: Run integration tests on Scylla (${{ matrix.scylla-version }}) - run: mvn -B verify -Pshort -Dscylla.version=${{ matrix.scylla-version }} - - - uses: actions/upload-artifact@v2 - if: ${{ failure() }} - with: - name: ccm-logs-scylla-${{ matrix.scylla-version }} - path: /tmp/*-0/ccm*/node*/logs/* \ No newline at end of file diff --git a/.github/workflows/tests@v1.yml b/.github/workflows/tests@v1.yml new file mode 100644 index 00000000000..80702698380 --- /dev/null +++ b/.github/workflows/tests@v1.yml @@ -0,0 +1,227 @@ +name: Tests + +on: + push: + branches: [ scylla-3.*x ] + pull_request: + branches: [ scylla-3.*x ] + workflow_dispatch: + +jobs: + build: + name: Build + runs-on: ubuntu-latest + timeout-minutes: 10 + + strategy: + matrix: + java-version: [8, 11] + fail-fast: false + + steps: + - name: Checkout source + uses: actions/checkout@v2 + + - name: Set up JDK ${{ matrix.java-version }} + uses: actions/setup-java@v2 + with: + java-version: ${{ matrix.java-version }} + distribution: 'adopt' + + - name: Compile source and tests + run: mvn -B compile test-compile -Dfmt.skip=true -Dclirr.skip=true -Danimal.sniffer.skip=true + + verify: + name: Full verify + runs-on: ubuntu-latest + timeout-minutes: 10 + + strategy: + matrix: + java-version: [8, 11] + fail-fast: false + + steps: + - name: Checkout source + uses: actions/checkout@v2 + + - name: Set up JDK ${{ matrix.java-version }} + uses: actions/setup-java@v2 + with: + java-version: ${{ matrix.java-version }} + distribution: 'adopt' + + - name: Full verify + run: mvn -B verify -DskipTests + + unit-tests: + name: Unit tests + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout source + uses: actions/checkout@v2 + + - name: Set up JDK 8 + uses: actions/setup-java@v2 + with: + java-version: '8' + distribution: 'adopt' + + - name: Run unit tests + run: mvn -B test -Dfmt.skip=true -Dclirr.skip=true -Danimal.sniffer.skip=true + + - name: Copy test results + if: success() || failure() + run: | + shopt -s globstar + mkdir unit + cp --parents ./**/target/*-reports/*.xml unit/ + + - name: Upload test results + uses: actions/upload-artifact@v2 + if: success() || failure() + with: + name: test-results + path: "*/**/target/*-reports/*.xml" + + setup-integration-tests: + name: Setup ITs + runs-on: ubuntu-latest + timeout-minutes: 2 + + steps: + - name: Checkout source + uses: actions/checkout@v2 + + - name: Setup Python 3 + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Fetch Scylla and Cassandra versions + id: fetch-versions + run: | + pip3 install -r ci/requirements.txt + echo "::set-output name=scylla-integration-tests-versions::$(python3 ci/version_fetch.py scylla-oss-stable:2 scylla-oss-rc scylla-enterprise-stable:2 scylla-enterprise-rc)" + echo "::set-output name=cassandra-integration-tests-versions::$(python3 ci/version_fetch.py cassandra3-stable:1)" + + outputs: + scylla-integration-tests-versions: ${{ steps.fetch-versions.outputs.scylla-integration-tests-versions }} + cassandra-integration-tests-versions: ${{ steps.fetch-versions.outputs.cassandra-integration-tests-versions }} + + cassandra-integration-tests: + name: Cassandra ITs + runs-on: ubuntu-latest + needs: [setup-integration-tests] + timeout-minutes: 90 + + strategy: + matrix: + cassandra-version: ${{ fromJson(needs.setup-integration-tests.outputs.cassandra-integration-tests-versions) }} + fail-fast: false + + steps: + - name: Checkout source + uses: actions/checkout@v2 + + - name: Set up JDK 8 + uses: actions/setup-java@v2 + with: + java-version: '8' + distribution: 'adopt' + + - name: Setup Python 3 + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Setup environment + run: | + sudo sh -c "echo 'deb http://security.ubuntu.com/ubuntu xenial-security main' >> /etc/apt/sources.list" + sudo apt-get update + sudo apt-get install libssl1.0.0 + pip3 install https://github.com/scylladb/scylla-ccm/archive/master.zip + + - name: Run integration tests on Cassandra (${{ matrix.cassandra-version }}) + run: mvn -B verify -Pshort -Dcassandra.version=${{ matrix.cassandra-version }} -Dfmt.skip=true -Dclirr.skip=true -Danimal.sniffer.skip=true + + - name: Copy test results + if: success() || failure() + run: | + shopt -s globstar + mkdir cassandra-${{ matrix.cassandra-version }} + cp --parents ./**/target/*-reports/*.xml cassandra-${{ matrix.cassandra-version }}/ + + - name: Upload test results + uses: actions/upload-artifact@v2 + if: success() || failure() + with: + name: test-results + path: "*/**/target/*-reports/*.xml" + + - name: Upload CCM logs + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: ccm-logs-cassandra-${{ matrix.cassandra-version }} + path: /tmp/*-0/ccm*/node*/logs/* + + scylla-integration-tests: + name: Scylla ITs + runs-on: ubuntu-latest + needs: [setup-integration-tests] + timeout-minutes: 90 + + strategy: + matrix: + scylla-version: ${{ fromJson(needs.setup-integration-tests.outputs.scylla-integration-tests-versions) }} + fail-fast: false + + steps: + - name: Checkout source + uses: actions/checkout@v2 + + - name: Set up JDK 8 + uses: actions/setup-java@v2 + with: + java-version: '8' + distribution: 'adopt' + + - name: Setup Python 3 + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Setup environment + run: | + sudo sh -c "echo 'deb http://security.ubuntu.com/ubuntu xenial-security main' >> /etc/apt/sources.list" + sudo apt-get update + sudo apt-get install libssl1.0.0 + pip3 install https://github.com/scylladb/scylla-ccm/archive/master.zip + sudo sh -c "echo 2097152 >> /proc/sys/fs/aio-max-nr" + + - name: Run integration tests on Scylla (${{ matrix.scylla-version }}) + run: mvn -B verify -Pshort -Dscylla.version=${{ matrix.scylla-version }} -Dfmt.skip=true -Dclirr.skip=true -Danimal.sniffer.skip=true + + - name: Copy test results + if: success() || failure() + run: | + shopt -s globstar + mkdir scylla-${{ matrix.scylla-version }} + cp --parents ./**/target/*-reports/*.xml scylla-${{ matrix.scylla-version }}/ + + - name: Upload test results + uses: actions/upload-artifact@v2 + if: success() || failure() + with: + name: test-results + path: "*/**/target/*-reports/*.xml" + + - name: Upload CCM logs + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: ccm-logs-scylla-${{ matrix.scylla-version }} + path: /tmp/*-0/ccm*/node*/logs/* diff --git a/.gitorderfile b/.gitorderfile new file mode 100644 index 00000000000..e69de29bb2d diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c5450ce3cd4..be63f984c2b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,10 +22,12 @@ imports in ASCII sort order. In addition, please avoid using wildcard imports. ## Working on an issue -Before starting to work on something, please comment in JIRA or ask on the mailing list -to make sure nobody else is working on it. +We use [github issues](https://github.com/scylladb/java-driver/issues) to track ongoing issues. +Before starting to work on something, please check the issues list to make sure nobody else is working on it. +It's also a good idea to get in contact through [ScyllaDB-Users Slack](https://scylladb-users.slack.com/) +to make your intentions known and clear. -If your fix applies to multiple branches, base your work on the lowest active branch. Since version 3 of the driver, +If your fix applies to multiple branches, base your work on the lowest active branch. Most of the time if you want to implement a feature for driver version 3, then you'll base your work on `scylla-3.x` (and `scylla-4.x` for version 4). Since version 3 of the driver, we've adopted [semantic versioning](http://semver.org/) and our branches use the following scheme: ``` @@ -59,15 +61,13 @@ Before you send your pull request, make sure that: - you have a unit test that failed before the fix and succeeds after. - the fix is mentioned in `changelog/README.md`. -- the commit message include the reference of the JIRA ticket for automatic linking - (example: `JAVA-503: Fix NPE when a connection fails during pool construction.`). +- the commit message includes the reference of the github issue for + automatic linking + (example: `Fixes #1234`). As long as your pull request is not merged, it's OK to rebase your branch and push with -`--force`. - -If you want to contribute but don't have a specific issue in mind, the [lhf](https://datastax-oss.atlassian.net/secure/IssueNavigator.jspa?reset=true&mode=hide&jqlQuery=project%20%3D%20JAVA%20AND%20status%20in%20(Open%2C%20Reopened)%20AND%20labels%20%3D%20lhf) -label in JIRA is a good place to start: it marks "low hanging fruits" that don't require -in-depth knowledge of the codebase. +`--force`. Commit history should be as flat as reasonably possible. Multiple commits where each one represents a single logical piece of pull request are fine. +If you want to contribute but don't have a specific issue in mind, it's best to reach out through users slack. ## Editor configuration @@ -89,9 +89,17 @@ The Maven build uses profiles named after the categories to choose which tests t mvn test -Pshort ``` -The default is "unit". Each profile runs the ones before it ("short" runs unit, etc.) +The default is "unit". Each profile runs only their own category ("short" will *not* run "unit"). + +Integration tests use [CCM](https://github.com/pcmanus/ccm) to bootstrap Cassandra instances. It is recommended to +setup [Scylla CCM](https://github.com/scylladb/scylla-ccm) in its place: +``` +pip3 install https://github.com/scylladb/scylla-ccm/archive/master.zip +``` + +The SSL tests use `libssl.1.0.0.so`. Before starting the tests, make sure it is installed on your system +(`compat-openssl10` on Fedora and `libssl1.0.0` on Ubuntu, `xenial-security` repository source). -Integration tests use [CCM](https://github.com/pcmanus/ccm) to bootstrap Cassandra instances. Two Maven properties control its execution: - `cassandra.version`: the Cassandra version. This has a default value in the root POM, @@ -99,6 +107,13 @@ Two Maven properties control its execution: - `ipprefix`: the prefix of the IP addresses that the Cassandra instances will bind to (see below). This defaults to `127.0.1.`. +Additionally `-Dscylla.version=${{ matrix.scylla-version }}` can be used instead with Scylla CCM to test against Scylla. + +Examples: +- `mvn test -Pshort -Dcassandra.version=3.11.11` +- `mvn test -Plong -Dcassandra.version=3.11.11` +- `mvn verify -Plong -Dscylla.version=4.3.6` + CCM launches multiple Cassandra instances on localhost by binding to different addresses. The driver uses up to 10 different instances (127.0.1.1 to 127.0.1.10 with the default prefix). diff --git a/README-dev.md b/README-dev.md new file mode 100644 index 00000000000..5da38fd77da --- /dev/null +++ b/README-dev.md @@ -0,0 +1,17 @@ +# Building the docs + +The docs build instructions have been tested with Sphinx 4 and Fedora 32. + +## Prerequisites + +To build and preview the docs locally, you will need to install the following software: + +- Git +- Python 3.7 +- pip +- Java JDK 8 or above +- Maven + +## Commands + +For more information, see [Commands](https://sphinx-theme.scylladb.com/stable/commands.html). diff --git a/README-dev.rst b/README-dev.rst deleted file mode 100644 index 98e6f0b573c..00000000000 --- a/README-dev.rst +++ /dev/null @@ -1,68 +0,0 @@ -Building the Docs -================= - -*Note*: The docs build instructions have been tested with Sphinx 2.4.4 and Fedora 32. - -To build and preview the docs locally, you will need to install the following software: - -- `Git `_ -- `Python 3.7 `_ -- `pip `_ -- Java JDK 6 or above -- Maven - -Run the following command to build the docs. - -.. code:: console - - cd docs - make preview - -Once the command completes processing, open http://127.0.0.1:5500/ with your preferred browser. - -Building multiple documentation versions -======================================== - -Build Sphinx docs for all the versions defined in ``docs/conf.py``. - -.. code:: console - - cd docs - make multiversion - -Then, open ``docs/_build/dirhtml//index.html`` with your preferred browser. - -**NOTE:** If you only can see docs generated for the master branch, try to run ``git fetch --tags`` to download the latest tags from remote. - -Defining supported versions -=========================== - -Let's say you want to generate docs for the new version ``scylla-3.x.y``. - -1. The file ``.github/workflows`` defines the branch from where all the documentation versions will be build. - -.. code:: yaml - - on: - push: - branches: - - scylla-3.x - -In our case, this branch currently is``scylla-3.x``. -In practice, this means that the file ``docs/source/conf.py`` of ```scylla-3.x`` defines which documentation versions are supported. - -2. In the file ``docs/source/conf.py`` (``scylla-3.x`` branch), list the new target version support inside the ``BRANCHES`` array. -For example, listing ``scylla-3.x.y`` should look like in your code: - -.. code:: python - - BRANCHES = ['scylla-3.x.y'] - smv_branch_whitelist = multiversion_regex_builder(BRANCHES) - -3. (optional) If the new version is the latest stable version, update as well the variable ``smv_latest_version`` in ``docs/source/conf.py``. - -.. code:: python - - smv_latest_version = 'scylla-3.x.y' - -4. Commit & push the changes to the ``scylla-3.x`` branch. diff --git a/README.md b/README.md index 001d420a60f..5f17898f6c5 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ not yet have been released. You can find the documentation for the latest version through the [Java driver docs](https://docs.scylladb.com/using-scylla/scylla-java-driver/) or via the release tags, [e.g. -3.10.2.0](https://github.com/scylladb/java-driver/releases/tag/3.10.2.0).* +3.11.2.0](https://github.com/scylladb/java-driver/releases/tag/3.11.2.0).* A modern, [feature-rich](manual/) and highly tunable Java client library for Apache Cassandra (2.1+) and using exclusively Cassandra's binary protocol @@ -62,7 +62,7 @@ it in your application using the following Maven dependency com.scylladb scylla-driver-core - 3.10.2.0 + 3.11.2.0 ``` @@ -72,7 +72,7 @@ Note that the object mapper is published as a separate artifact: com.scylladb scylla-driver-mapping - 3.10.2.0 + 3.11.2.0 ``` @@ -82,7 +82,7 @@ The 'extras' module is also published as a separate artifact: com.scylladb scylla-driver-extras - 3.10.2.0 + 3.11.2.0 ``` @@ -92,8 +92,8 @@ to avoid the explicit dependency to Netty. ## Compatibility -The Java client driver 3.10.2.0 ([branch 3.x](https://github.com/scylladb/java-driver/tree/3.x)) is compatible with Apache -Cassandra 2.1, 2.2 and 3.0+. +The Java client driver 3.11.2.0 ([branch 3.x](https://github.com/scylladb/java-driver/tree/3.x)) is compatible with +Scylla and Apache Cassandra 2.1, 2.2, 3.0+. UDT and tuple support is available only when using Apache Cassandra 2.1 or higher. diff --git a/changelog/README.md b/changelog/README.md index 1ba799ce701..971460f7010 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -5,6 +5,24 @@ 3.x versions get published. --> +### 3.11.2 +- [improvement] JAVA-3008: Upgrade Netty to 4.1.75, 3.x edition +- [improvement] JAVA-2984: Upgrade Jackson to resolve high-priority CVEs + + +### 3.11.1 +- [bug] JAVA-2967: Support native transport peer information for DSE 6.8. +- [bug] JAVA-2976: Support missing protocol v5 error codes CAS_WRITE_UNKNOWN, CDC_WRITE_FAILURE. + + +### 3.11.0 + +- [improvement] JAVA-2705: Remove protocol v5 beta status, add v6-beta. +- [bug] JAVA-2923: Detect and use Guava's new HostAndPort.getHost method. +- [bug] JAVA-2922: Switch to modern framing format inside a channel handler. +- [bug] JAVA-2924: Consider protocol version unsupported when server requires USE_BETA flag for it. + + ### 3.10.2 - [bug] JAVA-2860: Avoid NPE if channel initialization crashes. @@ -16,8 +34,9 @@ ### 3.10.0 -- [improvement] JAVA-2676: Don't reschedule flusher after empty runs -- [new feature] JAVA-2772: Support new protocol v5 message format +- [improvement] JAVA-2676: Don't reschedule flusher after empty runs. +- [new feature] JAVA-2772: Support new protocol v5 message format. + ### 3.9.0 diff --git a/ci/appveyor.ps1 b/ci/appveyor.ps1 deleted file mode 100644 index bc1d95b69f7..00000000000 --- a/ci/appveyor.ps1 +++ /dev/null @@ -1,132 +0,0 @@ -Add-Type -AssemblyName System.IO.Compression.FileSystem - -$dep_dir="C:\Users\appveyor\deps" -If (!(Test-Path $dep_dir)) { - Write-Host "Creating $($dep_dir)" - New-Item -Path $dep_dir -ItemType Directory -Force -} - -$apr_platform = "Win32" -$openssl_platform = "Win32" -$vc_platform = "x86" -$env:PYTHON="C:\Python27" -$env:OPENSSL_PATH="C:\OpenSSL-Win32" -If ($env:PLATFORM -eq "X64") { - $apr_platform = "x64" - $vc_platform = "x64" - $env:PYTHON="C:\Python27-x64" - $env:OPENSSL_PATH="C:\OpenSSL-Win64" -} - -$env:JAVA_HOME="C:\Program Files\Java\jdk$($env:java_version)" -# The configured java version to test with. -$env:JAVA_PLATFORM_HOME="$($env:JAVA_HOME)" -$env:JAVA_8_HOME="C:\Program Files\Java\jdk1.8.0" -$env:PATH="$($env:PYTHON);$($env:PYTHON)\Scripts;$($env:JAVA_HOME)\bin;$($env:OPENSSL_PATH)\bin;$($env:PATH)" -$env:CCM_PATH="$($dep_dir)\ccm" - -$apr_dist_path = "$($dep_dir)\apr" -# Build APR if it hasn't been previously built. -If (!(Test-Path $apr_dist_path)) { - Write-Host "Cloning APR" - $apr_path = "C:\Users\appveyor\apr" - Start-Process git -ArgumentList "clone --branch=1.5.2 --depth=1 https://github.com/apache/apr.git $($apr_path)" -Wait -nnw - Write-Host "Setting Visual Studio Environment to VS 2015" - Push-Location "$($env:VS140COMNTOOLS)\..\..\VC" - cmd /c "vcvarsall.bat $vc_platform & set" | - foreach { - if ($_ -match "=") { - $v = $_.split("="); Set-Item -force -path "ENV:\$($v[0])" -value "$($v[1])" - } - } - Pop-Location - Write-Host "Building APR (an error may be printed, but it will still build)" - Push-Location $($apr_path) - cmd /c nmake -f Makefile.win ARCH="$apr_platform Release" PREFIX=$($apr_dist_path) buildall install - Pop-Location - Write-Host "Done Building APR" -} -$env:PATH="$($apr_dist_path)\bin;$($env:PATH)" - -# Install Ant and Maven -$ant_base = "$($dep_dir)\ant" -$ant_path = "$($ant_base)\apache-ant-1.9.7" -If (!(Test-Path $ant_path)) { - Write-Host "Installing Ant" - $ant_url = "https://www.dropbox.com/s/lgx95x1jr6s787l/apache-ant-1.9.7-bin.zip?dl=1" - $ant_zip = "C:\Users\appveyor\apache-ant-1.9.7-bin.zip" - (new-object System.Net.WebClient).DownloadFile($ant_url, $ant_zip) - [System.IO.Compression.ZipFile]::ExtractToDirectory($ant_zip, $ant_base) -} -$env:PATH="$($ant_path)\bin;$($env:PATH)" - -$maven_base = "$($dep_dir)\maven" -$maven_path = "$($maven_base)\apache-maven-3.2.5" -If (!(Test-Path $maven_path)) { - Write-Host "Installing Maven" - $maven_url = "https://www.dropbox.com/s/fh9kffmexprsmha/apache-maven-3.2.5-bin.zip?dl=1" - $maven_zip = "C:\Users\appveyor\apache-maven-3.2.5-bin.zip" - (new-object System.Net.WebClient).DownloadFile($maven_url, $maven_zip) - [System.IO.Compression.ZipFile]::ExtractToDirectory($maven_zip, $maven_base) -} -$env:M2_HOME="$($maven_path)" -$env:PATH="$($maven_path)\bin;$($env:PATH)" - -$jdks = @("1.6.0", "1.7.0", "1.8.0") -foreach ($jdk in $jdks) { - $java_dir = "C:\Program Files\Java\jdk$jdk" - $jce_target = "$java_dir\jre\lib\security" - $jce_indicator = "$jce_target\README.txt" - # Install Java Cryptographic Extensions, needed for SSL. - # If this file doesn't exist we know JCE hasn't been installed. - If (!(Test-Path $jce_indicator)) { - Write-Host "Installing JCE for $jdk" - $zip = "$dep_dir\jce_policy-$jdk.zip" - $url = "https://www.dropbox.com/s/po4308hlwulpvep/UnlimitedJCEPolicyJDK7.zip?dl=1" - $extract_folder = "UnlimitedJCEPolicy" - If ($jdk -eq "1.8.0") { - $url = "https://www.dropbox.com/s/al1e6e92cjdv7m7/jce_policy-8.zip?dl=1" - $extract_folder = "UnlimitedJCEPolicyJDK8" - } - ElseIf ($jdk -eq "1.6.0") { - $url = "https://www.dropbox.com/s/dhrtucxcif4n11k/jce_policy-6.zip?dl=1" - $extract_folder = "jce" - } - # Download zip to staging area if it doesn't exist, we do this because - # we extract it to the directory based on the platform and we want to cache - # this file so it can apply to all platforms. - if(!(Test-Path $zip)) { - (new-object System.Net.WebClient).DownloadFile($url, $zip) - } - - [System.IO.Compression.ZipFile]::ExtractToDirectory($zip, $jce_target) - - $jcePolicyDir = "$jce_target\$extract_folder" - Move-Item $jcePolicyDir\* $jce_target\ -force - Remove-Item $jcePolicyDir - } -} - -# Install Python Dependencies for CCM. -Write-Host "Installing Python Dependencies for CCM" -Start-Process python -ArgumentList "-m pip install psutil pyYaml six" -Wait -nnw - -# Clone ccm from git and use master. -If (!(Test-Path $env:CCM_PATH)) { - Write-Host "Cloning CCM" - Start-Process git -ArgumentList "clone https://github.com/pcmanus/ccm.git $($env:CCM_PATH)" -Wait -nnw -} - -# Copy ccm -> ccm.py so windows knows to run it. -If (!(Test-Path $env:CCM_PATH\ccm.py)) { - Copy-Item "$env:CCM_PATH\ccm" "$env:CCM_PATH\ccm.py" -} -$env:PYTHONPATH="$($env:CCM_PATH);$($env:PYTHONPATH)" -$env:PATH="$($env:CCM_PATH);$($env:PATH)" - -# Predownload cassandra version for CCM if it isn't already downloaded. -If (!(Test-Path C:\Users\appveyor\.ccm\repository\$env:cassandra_version)) { - Write-Host "Preinstalling C* $($env:cassandra_version)" - Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py create -v $($env:cassandra_version) -n 1 predownload" -Wait -nnw - Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py remove predownload" -Wait -nnw -} diff --git a/ci/appveyor.yml b/ci/appveyor.yml deleted file mode 100644 index 81dd5b01958..00000000000 --- a/ci/appveyor.yml +++ /dev/null @@ -1,20 +0,0 @@ -environment: - test_profile: default - matrix: - - java_version: 1.6.0 - - java_version: 1.7.0 - - java_version: 1.8.0 - test_profile: short -platform: x64 -install: - - ps: .\ci\appveyor.ps1 -build_script: - - "set \"JAVA_HOME=%JAVA_8_HOME%\" && mvn install -DskipTests=true -B -V" -test_script: - - "set \"JAVA_HOME=%JAVA_PLATFORM_HOME%\" && mvn -B -D\"ccm.java.home\"=\"%JAVA_8_HOME%\" -D\"ccm.maxNumberOfNodes\"=1 -D\"cassandra.version\"=%cassandra_version% verify -P %test_profile%" -on_finish: - - ps: .\ci\uploadtests.ps1 -cache: - - C:\Users\appveyor\.m2 - - C:\Users\appveyor\.ccm\repository - - C:\Users\appveyor\deps -> .\ci\appveyor.ps1 diff --git a/ci/requirements.txt b/ci/requirements.txt new file mode 100644 index 00000000000..663bd1f6a2a --- /dev/null +++ b/ci/requirements.txt @@ -0,0 +1 @@ +requests \ No newline at end of file diff --git a/ci/uploadtests.ps1 b/ci/uploadtests.ps1 deleted file mode 100644 index cf88b16229c..00000000000 --- a/ci/uploadtests.ps1 +++ /dev/null @@ -1,17 +0,0 @@ -$testResults=Get-ChildItem TEST-TestSuite.xml -Recurse - -Write-Host "Uploading test results." - -$url = "https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)" -$wc = New-Object 'System.Net.WebClient' - -foreach ($testResult in $testResults) { - try { - Write-Host -ForegroundColor Green "Uploading $testResult -> $url." - $wc.UploadFile($url, $testResult) - } catch [Net.WebException] { - Write-Host -ForegroundColor Red "Failed Uploading $testResult -> $url. $_" - } -} - -Write-Host "Done uploading test results." diff --git a/ci/version_fetch.py b/ci/version_fetch.py new file mode 100644 index 00000000000..04a690baf26 --- /dev/null +++ b/ci/version_fetch.py @@ -0,0 +1,208 @@ +#!/usr/bin/python3 + +""" +This Python script allows you to list the +latest version numbers of Scylla and Cassandra. + +You can specify whether you want the +versions of Scylla OSS or Scylla Enterprise, +either N latest stable X.Y.latest or +all non-obsolete RCs. You can also fetch +the latest version of Cassandra 3. + +How are those versions fetched? We use Docker Hub +tags API. +""" + +import requests +import re +import json +from itertools import groupby, islice +import sys + +DOCKER_HUB_TAGS_ENDPOINT = 'https://registry.hub.docker.com/v1/repositories/%s/tags' +DOCKER_HUB_SCYLLA_ORG = 'scylladb/' + +SCYLLA_OSS = DOCKER_HUB_SCYLLA_ORG + 'scylla' +SCYLLA_OSS_RELEASED_VERSION_REGEX = re.compile(r'(\d+)\.(\d+)\.(\d+)') +SCYLLA_OSS_RC_VERSION_REGEX = re.compile(r'(\d+)\.(\d+)\.rc(\d+)') + +SCYLLA_ENTERPRISE = DOCKER_HUB_SCYLLA_ORG + 'scylla-enterprise' +SCYLLA_ENTERPRISE_RELEASED_VERSION_REGEX = re.compile(r'(\d{4})\.(\d+)\.(\d+)') +SCYLLA_ENTERPRISE_RC_VERSION_REGEX = re.compile(r'(\d{4})\.(\d+)\.rc(\d+)') + +CASSANDRA_ENDPOINT = 'https://dlcdn.apache.org/cassandra/' + +CASSANDRA3_REGEX = re.compile(r'a href="(3)\.(\d+)\.(\d+)/"') + +COMMAND_LINE_ARGUMENT = re.compile( + r'((?:(scylla-oss-stable):(\d+))|(?:(scylla-enterprise-stable):(\d+))|(?:(cassandra3-stable):(\d+))|(?:(scylla-oss-rc))|(?:(scylla-enterprise-rc)))') + + +def fetch_last_scylla_oss_minor_versions(count): + # Download Docker tags for repository + tags_data = requests.get(DOCKER_HUB_TAGS_ENDPOINT % (SCYLLA_OSS)).json() + tags_data = map(lambda e: e['name'], tags_data) + + # Parse only those tags which match 'NUM.NUM.NUM' + # into tuple (NUM, NUM, NUM) + tags_data = filter(SCYLLA_OSS_RELEASED_VERSION_REGEX.fullmatch, tags_data) + tags_data = map(lambda e: SCYLLA_OSS_RELEASED_VERSION_REGEX.match( + e).groups(), tags_data) + tags_data = map(lambda e: tuple(map(int, e)), tags_data) + + # Group by (major, minor) and select latest patch version + tags_data = sorted(tags_data) + tags_data = groupby(tags_data, key=lambda e: (e[0], e[1])) + tags_data = ((e[0][0], e[0][1], max(e[1])[2]) + for e in tags_data) + + # Return the latest ones + tags_data = list(tags_data)[-count:] + tags_data = [f'{e[0]}.{e[1]}.{e[2]}' for e in tags_data] + return tags_data + + +def fetch_all_scylla_oss_rc_versions(): + # Download Docker tags for repository + tags_data = requests.get(DOCKER_HUB_TAGS_ENDPOINT % (SCYLLA_OSS)).json() + tags_data = list(map(lambda e: e['name'], tags_data)) + + # Parse only those tags which match 'NUM.NUM.rcNUM' + # into tuple (NUM, NUM, NUM) + rc_tags_data = filter(SCYLLA_OSS_RC_VERSION_REGEX.fullmatch, tags_data) + rc_tags_data = map(lambda e: SCYLLA_OSS_RC_VERSION_REGEX.match( + e).groups(), rc_tags_data) + rc_tags_data = map(lambda e: tuple(map(int, e)), rc_tags_data) + + # Parse only those tags which match 'NUM.NUM.NUM' + # into tuple (NUM, NUM) + stable_tags_data = filter( + SCYLLA_OSS_RELEASED_VERSION_REGEX.fullmatch, tags_data) + stable_tags_data = map(lambda e: SCYLLA_OSS_RELEASED_VERSION_REGEX.match( + e).groups(), stable_tags_data) + stable_tags_data = map(lambda e: tuple(map(int, e[0:2])), stable_tags_data) + stable_tags_data = set(stable_tags_data) + + # Group by (major, minor) and select latest RC version + rc_tags_data = sorted(rc_tags_data) + rc_tags_data = groupby(rc_tags_data, key=lambda e: (e[0], e[1])) + rc_tags_data = ((e[0][0], e[0][1], max(e[1])[2]) + for e in rc_tags_data) + + # Filter out those RCs that are obsoleted by released stable version + rc_tags_data = filter(lambda e: ( + e[0], e[1]) not in stable_tags_data, rc_tags_data) + rc_tags_data = [f'{e[0]}.{e[1]}.rc{e[2]}' for e in rc_tags_data] + return rc_tags_data + + +def fetch_last_scylla_enterprise_minor_versions(count): + # Download Docker tags for repository + tags_data = requests.get(DOCKER_HUB_TAGS_ENDPOINT % + (SCYLLA_ENTERPRISE)).json() + tags_data = map(lambda e: e['name'], tags_data) + + # Parse only those tags which match 'YEAR.NUM.NUM' + # into tuple (YEAR, NUM, NUM) + tags_data = filter( + SCYLLA_ENTERPRISE_RELEASED_VERSION_REGEX.fullmatch, tags_data) + tags_data = map(lambda e: SCYLLA_ENTERPRISE_RELEASED_VERSION_REGEX.match( + e).groups(), tags_data) + tags_data = map(lambda e: tuple(map(int, e)), tags_data) + + # Group by (major, minor) and select latest patch version + tags_data = sorted(tags_data) + tags_data = groupby(tags_data, key=lambda e: (e[0], e[1])) + tags_data = ((e[0][0], e[0][1], max(e[1])[2]) + for e in tags_data) + + # Return the latest ones + tags_data = list(tags_data)[-count:] + tags_data = [f'{e[0]}.{e[1]}.{e[2]}' for e in tags_data] + return tags_data + + +def fetch_all_scylla_enterprise_rc_versions(): + # Download Docker tags for repository + tags_data = requests.get(DOCKER_HUB_TAGS_ENDPOINT % + (SCYLLA_ENTERPRISE)).json() + tags_data = list(map(lambda e: e['name'], tags_data)) + + # Parse only those tags which match 'YEAR.NUM.rcNUM' + # into tuple (YEAR, NUM, NUM) + rc_tags_data = filter( + SCYLLA_ENTERPRISE_RC_VERSION_REGEX.fullmatch, tags_data) + rc_tags_data = map(lambda e: SCYLLA_ENTERPRISE_RC_VERSION_REGEX.match( + e).groups(), rc_tags_data) + rc_tags_data = map(lambda e: tuple(map(int, e)), rc_tags_data) + + # Parse only those tags which match 'YEAR.NUM.NUM' + # into tuple (YEAR, NUM) + stable_tags_data = filter( + SCYLLA_ENTERPRISE_RELEASED_VERSION_REGEX.fullmatch, tags_data) + stable_tags_data = map(lambda e: SCYLLA_ENTERPRISE_RELEASED_VERSION_REGEX.match( + e).groups(), stable_tags_data) + stable_tags_data = map(lambda e: tuple(map(int, e[0:2])), stable_tags_data) + + # Group by (major, minor) and select latest RC version + rc_tags_data = sorted(rc_tags_data) + rc_tags_data = groupby(rc_tags_data, key=lambda e: (e[0], e[1])) + rc_tags_data = ((e[0][0], e[0][1], max(e[1])[2]) + for e in rc_tags_data) + + # Filter out those RCs that are obsoleted by released stable version + rc_tags_data = filter(lambda e: ( + e[0], e[1]) not in stable_tags_data, rc_tags_data) + rc_tags_data = [f'{e[0]}.{e[1]}.rc{e[2]}' for e in rc_tags_data] + return rc_tags_data + + +def fetch_last_cassandra3_minor_versions(count): + # Download folder listing for Cassandra download site + data = requests.get(CASSANDRA_ENDPOINT).text + + # Parse only those version numbers which match '3.NUM.NUM' + # into tuple (3, NUM, NUM) + data = CASSANDRA3_REGEX.finditer(data) + data = map(lambda e: e.groups(), data) + data = map(lambda e: tuple(map(int, e)), data) + + # Group by (major, minor) and select latest patch version + data = sorted(data) + data = groupby(data, key=lambda e: (e[0], e[1])) + data = ((e[0][0], e[0][1], max(e[1])[2]) + for e in data) + + # Return the latest ones + data = list(data)[-count:] + data = [f'{e[0]}.{e[1]}.{e[2]}' for e in data] + return data + + +if __name__ == '__main__': + names = set() + + for arg in sys.argv[1:]: + if not COMMAND_LINE_ARGUMENT.fullmatch(arg): + print("Usage:", sys.argv[0], "[scylla-oss-stable:COUNT] [scylla-oss-rc] [scylla-enterprise-stable:COUNT] [scylla-enterprise-rc] [cassandra3-stable:COUNT]...", file=sys.stderr) + sys.exit(1) + + groups = COMMAND_LINE_ARGUMENT.match(arg).groups() + groups = [g for g in groups if g][1:] + + mode_name = groups[0] + if mode_name == 'scylla-oss-stable': + names.update(fetch_last_scylla_oss_minor_versions(int(groups[1]))) + elif mode_name == 'scylla-enterprise-stable': + names.update( + fetch_last_scylla_enterprise_minor_versions(int(groups[1]))) + elif mode_name == 'cassandra3-stable': + names.update( + fetch_last_cassandra3_minor_versions(int(groups[1]))) + elif mode_name == 'scylla-oss-rc': + names.update(fetch_all_scylla_oss_rc_versions()) + elif mode_name == 'scylla-enterprise-rc': + names.update(fetch_all_scylla_enterprise_rc_versions()) + + print(json.dumps(list(names))) diff --git a/docs.yaml b/docs.yaml deleted file mode 100644 index b38b86397d7..00000000000 --- a/docs.yaml +++ /dev/null @@ -1,67 +0,0 @@ -title: Java Driver for Apache Cassandra -summary: High performance Java client for Apache Cassandra -homepage: http://datastax.github.io/java-driver/ -theme: datastax -sections: - - title: Manual - prefix: /manual - sources: - - type: markdown - files: 'manual/**/*.md' - # The 'manual' section was called 'features' in older releases. Leave both - # definitions and Documentor will pick up whichever exists and ignore the - # other. - - title: Features - prefix: /features - sources: - - type: markdown - files: 'features/**/*.md' - - title: Changelog - prefix: /changelog - sources: - - type: markdown - files: 'changelog/**/*.md' - - title: Upgrading - prefix: /upgrade_guide - sources: - - type: markdown - files: 'upgrade_guide/**/*.md' - - title: FAQ - prefix: /faq - sources: - - type: markdown - files: 'faq/**/*.md' -links: - - title: Code - href: https://github.com/datastax/java-driver/ - - title: Docs - href: http://docs.datastax.com/en/developer/java-driver/ - - title: Issues - href: https://datastax-oss.atlassian.net/browse/JAVA/ - - title: Mailing List - href: https://groups.google.com/a/lists.datastax.com/forum/#!forum/java-driver-user - - title: Releases - href: http://downloads.datastax.com/java-driver/ -api_docs: - 3.3: http://docs.datastax.com/en/drivers/java/3.3 - 4.0-alpha: http://docs.datastax.com/en/drivers/java/4.0 - 3.2: http://docs.datastax.com/en/drivers/java/3.2 - 3.1: http://docs.datastax.com/en/drivers/java/3.1 - 3.0: http://docs.datastax.com/en/drivers/java/3.0 - 2.1: http://docs.datastax.com/en/drivers/java/2.1 - 2.0: http://docs.datastax.com/en/drivers/java/2.0 -versions: - - name: '3.3' - ref: '3.3.0' - - name: '4.0-alpha' - ref: '9f0edeb' - - name: '3.2' - ref: '3.2_docfixes' - - name: '3.1' - ref: '3.1_docfixes' - - name: '3.0' - ref: '3.0_docfixes' - - name: '2.1' - ref: '2.1.10.3' - - name: '2.0' - ref: '2.0.12.3' diff --git a/docs/Makefile b/docs/Makefile index 79bbe97a0dc..9b2c899ab50 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,81 +1,111 @@ # You can set these variables from the command line. -POETRY = $(HOME)/.poetry/bin/poetry +POETRY = poetry SPHINXOPTS = SPHINXBUILD = $(POETRY) run sphinx-build PAPER = BUILDDIR = _build -SOURCE_DIR = _source +SOURCEDIR = _source -# Internal variables. +# Internal variables PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCE_DIR) -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) +TESTSPHINXOPTS = $(ALLSPHINXOPTS) -W --keep-going +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) + +# Windows variables +ifeq ($(OS),Windows_NT) + POETRY = $(APPDATA)\Python\Scripts\poetry +endif + +define javadoc + cd .. && ./docs/_utils/javadoc.sh +endef .PHONY: all all: dirhtml +# Setup commands +.PHONY: setupenv +setupenv: + pip install -q poetry + +.PHONY: setup +setup: + $(POETRY) install + $(POETRY) update + cp -TLr source $(SOURCEDIR) + cd $(SOURCEDIR) && find . -name README.md -execdir mv '{}' index.md ';' + +# Clean commands .PHONY: pristine pristine: clean git clean -dfX -.PHONY: setup -setup: - ./_utils/setup.sh - cp -TLr source $(SOURCE_DIR) - cd $(SOURCE_DIR) && find . -name README.md -execdir mv '{}' index.md ';' .PHONY: clean clean: rm -rf $(BUILDDIR)/* - rm -rf $(SOURCE_DIR)/* - -.PHONY: preview -preview: setup - cd .. && ./docs/_utils/javadoc.sh - $(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500 + rm -rf $(SOURCEDIR)/* +# Generate output commands .PHONY: dirhtml dirhtml: setup $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." -.PHONY: singlehtml -singlehtml: setup - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml +.PHONY: javadoc +javadoc: setup + @$(javadoc) @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: multiversion +multiversion: setup + $(POETRY) run ./_utils/multiversion.sh + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: epub -epub: setup +epub: setup $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 -epub3: setup +epub3:setup $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: dummy -dummy: setup +dummy: setup $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy @echo @echo "Build finished. Dummy builder generates no files." -.PHONY: linkcheck -linkcheck: setup - $(SPHINXBUILD) -b linkcheck $(SOURCE_DIR) $(BUILDDIR)/linkcheck +# Preview commands +.PHONY: preview +preview: setup + $(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500 --re-ignore 'api/*' -.PHONY: multiversion -multiversion: setup - @mkdir -p $(HOME)/.cache/pypoetry/virtualenvs - $(POETRY) run ./_utils/multiversion.sh +.PHONY: multiversionpreview +multiversionpreview: multiversion + $(POETRY) run python -m http.server 5500 --directory $(BUILDDIR)/dirhtml + +.PHONY: redirects +redirects: setup + $(POETRY) run redirects-cli fromfile --yaml-file _utils/redirects.yaml --output-dir $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." -.PHONY: multiversionpreview -multiversionpreview: multiversion - $(POETRY) run python3 -m http.server 5500 --directory $(BUILDDIR)/dirhtml +# Test commands +.PHONY: test +test: setup + $(SPHINXBUILD) -b dirhtml $(TESTSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: linkcheck +linkcheck: setup + $(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck \ No newline at end of file diff --git a/docs/_utils/javadoc.sh b/docs/_utils/javadoc.sh index 5ec6ab16dc0..5fadf3954d4 100755 --- a/docs/_utils/javadoc.sh +++ b/docs/_utils/javadoc.sh @@ -1,7 +1,7 @@ #!/bin/bash # Install dependencies -mvn install -DskipTests +mvn install -DskipTests -T 1C # Define output folder OUTPUT_DIR="docs/_build/dirhtml/api" @@ -11,7 +11,7 @@ if [[ "$SPHINX_MULTIVERSION_OUTPUTDIR" != "" ]]; then fi # Generate javadoc -mvn javadoc:javadoc +mvn javadoc:javadoc -T 1C [ -d $OUTPUT_DIR ] && rm -r $OUTPUT_DIR mkdir -p "$OUTPUT_DIR" mv -f driver-core/target/site/apidocs/* $OUTPUT_DIR diff --git a/docs/_utils/multiversion.sh b/docs/_utils/multiversion.sh index 19270f32932..89895a896c6 100755 --- a/docs/_utils/multiversion.sh +++ b/docs/_utils/multiversion.sh @@ -1,5 +1,5 @@ #! /bin/bash cd .. && sphinx-multiversion docs/source docs/_build/dirhtml \ - --pre-build './docs/_utils/javadoc.sh' \ - --pre-build "find . -mindepth 2 -name README.md -execdir mv '{}' index.md ';'" + --pre-build "find . -mindepth 2 -name README.md -execdir mv '{}' index.md ';'" \ + --post-build './docs/_utils/javadoc.sh' diff --git a/docs/_utils/redirections.yaml b/docs/_utils/redirections.yaml deleted file mode 100644 index 0e5f1ff9e07..00000000000 --- a/docs/_utils/redirections.yaml +++ /dev/null @@ -1 +0,0 @@ -api: /api/overview-summary.html diff --git a/docs/_utils/redirects.yaml b/docs/_utils/redirects.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/_utils/setup.sh b/docs/_utils/setup.sh deleted file mode 100755 index b8f50243e4f..00000000000 --- a/docs/_utils/setup.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -if pwd | egrep -q '\s'; then - echo "Working directory name contains one or more spaces." - exit 1 -fi - -which python3 || { echo "Failed to find python3. Try installing Python for your operative system: https://www.python.org/downloads/" && exit 1; } -which poetry || curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/1.1.3/get-poetry.py | python3 - && source ${HOME}/.poetry/env -poetry install -poetry update diff --git a/docs/pyproject.toml b/docs/pyproject.toml index bb5778d26da..580711c9f9b 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -6,17 +6,15 @@ authors = ["Java Driver Contributors"] [tool.poetry.dependencies] python = "^3.7" -pyyaml = "5.3" +pyyaml = "6.0" pygments = "2.2.0" -recommonmark = "0.5.0" -sphinx-scylladb-theme = "~0.1.12" +recommonmark = "0.7.1" +redirects_cli ="~0.1.2" +sphinx-scylladb-theme = "~1.3.1" sphinx-sitemap = "2.1.0" -sphinx-autobuild = "0.7.1" -Sphinx = "2.4.4" -sphinx-multiversion-scylla = "~0.2.6" - -[tool.poetry.dev-dependencies] -pytest = "5.2" +sphinx-autobuild = "2021.3.14" +Sphinx = "4.3.2" +sphinx-multiversion-scylla = "~0.2.12" [build-system] requires = ["poetry>=0.12"] diff --git a/docs/source/conf.py b/docs/source/conf.py index 2a1ca68daea..056562c84a5 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- import os -import sys from datetime import date -import yaml import re from docutils import nodes -from sphinx.util import logging from recommonmark.transform import AutoStructify from recommonmark.parser import CommonMarkParser, splitext, urlparse from sphinx_scylladb_theme.utils import multiversion_regex_builder -logger = logging.getLogger(__name__) # -- General configuration ------------------------------------------------ -# Add any Sphinx extension'¡' module names here, as strings. They can be +# Build documentation for the following tags and branches +TAGS = [] +BRANCHES = ['scylla-3.7.2.x', 'scylla-3.10.2.x', 'scylla-3.11.0.x', 'scylla-3.11.2.x', 'scylla-4.7.2.x', 'scylla-4.10.0.x', 'scylla-4.11.1.x', 'scylla-4.12.0.x', 'scylla-4.13.0.x'] +# Set the latest version. +LATEST_VERSION = 'scylla-4.13.0.x' +# Set which versions are not released yet. +UNSTABLE_VERSIONS = [] +# Set which versions are deprecated +DEPRECATED_VERSIONS = [] + +# Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ @@ -23,6 +29,7 @@ 'sphinx.ext.mathjax', 'sphinx.ext.githubpages', 'sphinx.ext.extlinks', + 'sphinx_sitemap', 'sphinx.ext.autosectionlabel', 'sphinx_scylladb_theme', 'sphinx_multiversion', @@ -77,7 +84,8 @@ def setup(app): app.add_transform(AutoStructify) # Replace DataStax links - replacements = {r'https://docs.datastax.com/en/drivers/java\/(.*?)\/': "https://java-driver.docs.scylladb.com/stable/api/"} + current_slug = os.getenv("SPHINX_MULTIVERSION_NAME", "stable") + replacements = {r'docs.datastax.com/en/drivers/java\/(.*?)\/': "java-driver.docs.scylladb.com/" + current_slug + "/api/"} app.add_config_value('replacements', replacements, True) app.connect('source-read', replace_relative_links) @@ -89,23 +97,10 @@ def setup(app): copyright = str(date.today().year) + ', ScyllaDB. All rights reserved.' author = u'Scylla Project Contributors' -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'3.7.1' -# The full version, including alpha/beta/rc tags. -release = u'3.7.1' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -language = None - # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'lib', 'lib64','**/_common/*', 'README.md', '.git', '.github', '_utils', '_templates', 'rst_include'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_utils'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' @@ -113,21 +108,34 @@ def setup(app): # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True -# Custom lexer -from pygments.lexers.shell import BashLexer -from sphinx.highlighting import lexers +# -- Options for not found extension ------------------------------------------- -class DitaaLexer(BashLexer): - pass -lexers['ditaa'] = DitaaLexer(startinline=True) +# Template used to render the 404.html generated by this extension. +notfound_template = '404.html' + +# Prefix added to all the URLs generated in the 404 page. +notfound_urls_prefix = '' + +# -- Options for multiversion extension ---------------------------------- + +# Whitelist pattern for tags +smv_tag_whitelist = multiversion_regex_builder(TAGS) +# Whitelist pattern for branches +smv_branch_whitelist = multiversion_regex_builder(BRANCHES) +# Defines which version is considered to be the latest stable version. +# Must be listed in smv_tag_whitelist or smv_branch_whitelist. +smv_latest_version = LATEST_VERSION +smv_rename_latest_version = 'stable' +# Whitelist pattern for remotes (set to None to use local branches only) +smv_remote_whitelist = r'^origin$' +# Pattern for released versions +smv_released_pattern = r'^tags/.*$' +# Format for versioned output directories inside the build directory +smv_outputdir_format = '{ref.name}' -# Adds version variables for monitoring and manager versions when used in inline text +# -- Options for sitemap extension --------------------------------------- -rst_epilog = """ -.. |mon_version| replace:: 3.1 -.. |man_version| replace:: 2.0 -.. |mon_root| replace:: :doc:`Scylla Monitoring Stack ` -""" +sitemap_url_scheme = 'stable/{link}' # -- Options for HTML output ---------------------------------------------- @@ -135,33 +143,22 @@ class DitaaLexer(BashLexer): # a list of builtin themes. # html_theme = 'sphinx_scylladb_theme' -# html_theme_path = ["../.."] - -html_style = '' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { + 'conf_py_path': 'docs/source/', 'branch_substring_removed': 'scylla-', - 'header_links': [ - ('Scylla Java Driver', 'https://java-driver.docs.scylladb.com/'), - ('Scylla Cloud', 'https://docs.scylladb.com/scylla-cloud/'), - ('Scylla University', 'https://university.scylladb.com/'), - ('ScyllaDB Home', 'https://www.scylladb.com/')], + 'github_repository': 'scylladb/java-driver', 'github_issues_repository': 'scylladb/java-driver', - 'show_sidebar_index': True, + 'hide_edit_this_page_button': 'false', + 'versions_unstable': UNSTABLE_VERSIONS, + 'versions_deprecated': DEPRECATED_VERSIONS, 'hide_version_dropdown': ['scylla-3.x'], } -extlinks = { - 'manager': ('/operating-scylla/manager/%s/',''), - 'manager_lst': ('/operating-scylla/manager/2.0/%s/',''), - 'monitor': ('/operating-scylla/monitoring/%s/',''), - 'monitor_lst': ('/operating-scylla/monitoring/3.1/%s/','') -} - # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. @@ -180,94 +177,3 @@ class DitaaLexer(BashLexer): # Dictionary of values to pass into the template engine’s context for all pages html_context = {'html_baseurl': html_baseurl} - -# -- Options for not found extension ------------------------------------------- - -# Template used to render the 404.html generated by this extension. -notfound_template = '404.html' - -# Prefix added to all the URLs generated in the 404 page. -notfound_urls_prefix = '' - -# -- Options for redirect extension --------------------------------------- - -# Read a YAML dictionary of redirections and generate an HTML file for each -redirects_file = "_utils/redirections.yaml" - -# -- Options for multiversion extension ---------------------------------- - -# Whitelist pattern for tags (set to None to ignore all tags) -TAGS = [] -smv_tag_whitelist = multiversion_regex_builder(TAGS) -# Whitelist pattern for branches (set to None to ignore all branches) -BRANCHES = ['scylla-3.x', 'scylla-3.7.2.x', 'scylla-3.10.2.x'] -smv_branch_whitelist = multiversion_regex_builder(BRANCHES) -# Defines which version is considered to be the latest stable version. -# Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = 'scylla-3.10.2.x' -smv_rename_latest_version = 'stable' -# Whitelist pattern for remotes (set to None to use local branches only) -smv_remote_whitelist = r"^origin$" -# Pattern for released versions -smv_released_pattern = r'^tags/.*$' -# Format for versioned output directories inside the build directory -smv_outputdir_format = '{ref.name}' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'ScyllaDocumentation.tex', u'Scylla Documentation Documentation', - u'Scylla Project Contributors', 'manual'), -] - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'scylladocumentation', u'Scylla Documentation Documentation', - [author], 1) -] - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'ScyllaDocumentation', u'Scylla Documentation Documentation', - author, 'ScyllaDocumentation', 'One line description of project.', - 'Miscellaneous'), -] - -# -- Options for Epub output ---------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = project -epub_author = author -epub_publisher = author -epub_copyright = copyright - -# A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] diff --git a/driver-core/pom.xml b/driver-core/pom.xml index ed6d9f6fac8..5fe304881b3 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-core diff --git a/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java index e5fc175fe62..e2b7a805483 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BatchStatement.java @@ -220,6 +220,7 @@ public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry cod case V3: case V4: case V5: + case V6: size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); size += QueryFlag.serializedSize(protocolVersion); // Serial CL and default timestamp also depend on session-level defaults (QueryOptions). diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index dad4ada8cb4..9317bd0a58b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -337,6 +337,7 @@ public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry cod case V3: case V4: case V5: + case V6: size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); size += QueryFlag.serializedSize(protocolVersion); if (wrapper.values.length > 0) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java b/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java index 98faf16c956..f11b1fcecf4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CloudConfigFactory.java @@ -228,8 +228,8 @@ protected InetSocketAddress getSniProxyAddress(JsonNode proxyMetadata) { throw new IllegalStateException( "Invalid proxy metadata: missing port from field sni_proxy_address"); } - return InetSocketAddress.createUnresolved( - sniProxyHostAndPort.getHostText(), sniProxyHostAndPort.getPort()); + String host = GuavaCompatibility.INSTANCE.getHost(sniProxyHostAndPort); + return InetSocketAddress.createUnresolved(host, sniProxyHostAndPort.getPort()); } else { throw new IllegalStateException("Invalid proxy metadata: missing field sni_proxy_address"); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 6c6b2c00552..cdac5cff4d0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -133,6 +133,10 @@ public class Cluster implements Closeable { static final int NEW_NODE_DELAY_SECONDS = SystemProperties.getInt("com.datastax.driver.NEW_NODE_DELAY_SECONDS", 1); + // Used in integration tests to force the driver to negotiate the protocol + // version even if it was explicitly set. + @VisibleForTesting static boolean shouldAlwaysNegotiateProtocolVersion = false; + // Some per-JVM number that allows to generate unique cluster names when // multiple Cluster instance are created in the same JVM. private static final AtomicInteger CLUSTER_ID = new AtomicInteger(0); @@ -748,6 +752,10 @@ public static class Builder implements Initializer { private boolean allowBetaProtocolVersion = false; private boolean noCompact = false; private boolean isCloud = false; + private boolean useAdvancedShardAwareness = true; + private boolean schemaQueriesPaged = true; + private int localPortLow = ProtocolOptions.DEFAULT_LOCAL_PORT_LOW; + private int localPortHigh = ProtocolOptions.DEFAULT_LOCAL_PORT_HIGH; private Collection listeners; @@ -863,11 +871,11 @@ public Builder withMaxSchemaAgreementWaitSeconds(int maxSchemaAgreementWaitSecon * *

By default, the driver will "auto-detect" which protocol version it can use when * connecting to the first node. More precisely, it will try first with {@link - * ProtocolVersion#NEWEST_SUPPORTED}, and if not supported fallback to the highest version - * supported by the first node it connects to. Please note that once the version is - * "auto-detected", it won't change: if the first node the driver connects to is a Cassandra 1.2 - * node and auto-detection is used (the default), then the native protocol version 1 will be use - * for the lifetime of the Cluster instance. + * ProtocolVersion#DEFAULT}, and if not supported fallback to the highest version supported by + * the first node it connects to. Please note that once the version is "auto-detected", it won't + * change: if the first node the driver connects to is a Cassandra 1.2 node and auto-detection + * is used (the default), then the native protocol version 1 will be use for the lifetime of the + * Cluster instance. * *

By using {@link Builder#allowBetaProtocolVersion()}, it is possible to force driver to * connect to Cassandra node that supports the latest protocol beta version. Leaving this flag @@ -1473,6 +1481,57 @@ public Builder withCloudSecureConnectBundle(InputStream cloudConfigInputStream) return addCloudConfigToBuilder(cloudConfig); } + /** + * Disables advanced shard awareness. By default, this driver chooses local port while making a + * connection to node, to signal which shard it wants to connect to. This allows driver to + * estabilish connection pool faster, especially when there are multiple clients connecting + * concurrently. If this causes any issues, you can disable it using this method. The most + * common issues are the NAT between client and node (which messes up client port numbers) and + * shard aware port (default: 19042) blocked by firewall. + * + * @return this builder. + */ + public Builder withoutAdvancedShardAwareness() { + this.useAdvancedShardAwareness = false; + return this; + } + + /** + * Disables paging in schema queries. By default, Queries that fetch schema from the cluster are + * paged. This option causes the least impact on the cluster latencies when a new client + * connects. Turning off paging may result in faster driver initialisation at the expense of + * higher cluster latencies. + * + * @return this builder. + */ + public Builder withoutPagingInSchemaQueries() { + this.schemaQueriesPaged = false; + return this; + } + + /** + * Sets local port range for use by advanced shard awareness. Driver will use ports from this + * range as local ports when connecting to cluster. If {@link #withoutAdvancedShardAwareness()} + * was called, then setting this range does not affect anything. + * + * @param low Lower bound of range, inclusive. + * @param high Upper bound of range, inclusive. + * @return this builder. + */ + public Builder withLocalPortRange(int low, int high) { + if (low < 1 || 65535 < low || high < 1 || 65535 < high) { + throw new IllegalArgumentException("Port numbers must be between 1 and 65535"); + } + + if (high - low < 1000) { + throw new IllegalArgumentException("Port range should be sufficiently large"); + } + + this.localPortLow = low; + this.localPortHigh = high; + return this; + } + private Builder addCloudConfigToBuilder(CloudConfig cloudConfig) { Builder builder = withEndPointFactory(new SniEndPointFactory(cloudConfig.getProxyAddress())) @@ -1519,15 +1578,26 @@ public Configuration getConfiguration() { maxSchemaAgreementWaitSeconds, sslOptions, authProvider, - noCompact) + noCompact, + useAdvancedShardAwareness, + localPortLow, + localPortHigh) .setCompression(compression); MetricsOptions metricsOptions = new MetricsOptions(metricsEnabled, jmxEnabled); + QueryOptions queryOptions = configurationBuilder.getQueryOptions(); + if (queryOptions == null) { + queryOptions = new QueryOptions(); + } + + queryOptions.setSchemaQueriesPaged(schemaQueriesPaged); + return configurationBuilder .withProtocolOptions(protocolOptions) .withMetricsOptions(metricsOptions) .withPolicies(policiesBuilder.build()) + .withQueryOptions(queryOptions) .build(); } @@ -1843,7 +1913,9 @@ long delayMs() { } private void negotiateProtocolVersionAndConnect() { - boolean shouldNegotiate = (configuration.getProtocolOptions().initialProtocolVersion == null); + boolean shouldNegotiate = + (configuration.getProtocolOptions().initialProtocolVersion == null + || shouldAlwaysNegotiateProtocolVersion); while (true) { try { controlConnection.connect(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java b/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java index 94295a0bb14..afaa7176bca 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CodecUtils.java @@ -64,6 +64,7 @@ public static int readSize(ByteBuffer input, ProtocolVersion version) { case V3: case V4: case V5: + case V6: return input.getInt(); default: throw version.unsupported(); @@ -92,6 +93,7 @@ public static void writeSize(ByteBuffer output, int size, ProtocolVersion versio case V3: case V4: case V5: + case V6: output.putInt(size); break; default: @@ -131,6 +133,7 @@ public static void writeValue(ByteBuffer output, ByteBuffer value, ProtocolVersi case V3: case V4: case V5: + case V6: if (value == null) { output.putInt(-1); } else { @@ -217,6 +220,7 @@ private static int sizeOfCollectionSize(ProtocolVersion version) { case V3: case V4: case V5: + case V6: return 4; default: throw version.unsupported(); @@ -237,6 +241,7 @@ private static int sizeOfValue(ByteBuffer value, ProtocolVersion version) { case V3: case V4: case V5: + case V6: return value == null ? 4 : 4 + value.remaining(); default: throw version.unsupported(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Configuration.java b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java index 3ef6922df1b..714d173bb81 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Configuration.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java @@ -348,6 +348,10 @@ public Builder withDefaultKeyspace(String keyspace) { return this; } + public QueryOptions getQueryOptions() { + return queryOptions; + } + /** * Builds the final object from this builder. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 5b0fb2cac3c..346b63c32ab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -72,7 +72,13 @@ import io.netty.util.Timer; import io.netty.util.TimerTask; import io.netty.util.concurrent.GlobalEventExecutor; +import java.io.IOException; import java.lang.ref.WeakReference; +import java.net.BindException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.security.InvalidParameterException; +import java.text.MessageFormat; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -106,6 +112,10 @@ class Connection { private static final int FLUSHER_SCHEDULE_PERIOD_NS = SystemProperties.getInt("com.datastax.driver.FLUSHER_SCHEDULE_PERIOD_NS", 10000); + private static final long ADV_SHARD_AWARENESS_BLOCK_ON_NAT = 1000000L * 60L * 1000L; + + private static final long ADV_SHARD_AWARENESS_BLOCK_ON_ERROR = 5 * 60 * 1000; + enum State { OPEN, TRASHED, @@ -121,6 +131,8 @@ enum State { private final String name; private volatile Integer shardId = null; + private int requestedShardId = -1; + @VisibleForTesting volatile Channel channel; private final Factory factory; @@ -169,44 +181,111 @@ protected Connection(String name, EndPoint endPoint, Factory factory, Owner owne } ListenableFuture initAsync() { + return initAsync(-1, 0); + } + + ListenableFuture initAsync(final int shardId, int serverPort) { if (factory.isShutdown) return Futures.immediateFailedFuture( new ConnectionException(endPoint, "Connection factory is shut down")); - ProtocolVersion protocolVersion = - factory.protocolVersion == null - ? ProtocolVersion.NEWEST_SUPPORTED - : factory.protocolVersion; + this.requestedShardId = shardId; + + final ProtocolVersion protocolVersion = + factory.protocolVersion == null ? ProtocolVersion.DEFAULT : factory.protocolVersion; final SettableFuture channelReadyFuture = SettableFuture.create(); try { - Bootstrap bootstrap = factory.newBootstrap(); - ProtocolOptions protocolOptions = factory.configuration.getProtocolOptions(); - bootstrap.handler( - new Initializer( - this, - protocolVersion, - protocolOptions.getCompression().compressor(), - protocolOptions.getSSLOptions(), - factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds(), - factory.configuration.getNettyOptions(), - factory.configuration.getCodecRegistry(), - factory.configuration.getMetricsOptions().isEnabled() - ? factory.manager.metrics - : null)); - - ChannelFuture future = bootstrap.connect(endPoint.resolve()); + final ProtocolOptions protocolOptions = factory.configuration.getProtocolOptions(); + final Bootstrap bootstrap = factory.newBootstrap(); + prepareBootstrap(bootstrap, protocolVersion, protocolOptions); + + final InetSocketAddress serverAddress = + (serverPort == 0) + ? endPoint.resolve() + : new InetSocketAddress(endPoint.resolve().getAddress(), serverPort); + + final Owner owner = ownerRef.get(); + final HostConnectionPool pool = + owner instanceof HostConnectionPool ? (HostConnectionPool) owner : null; + final ShardingInfo shardingInfo = pool == null ? null : pool.host.getShardingInfo(); + if ((shardingInfo == null) && shardId != -1) { + throw new InvalidParameterException( + MessageFormat.format( + "Requested connection to shard {0} of host {1}:{2}, but sharding info or pool is absent", + shardId, serverAddress.getAddress().getHostAddress(), serverPort)); + } - writer.incrementAndGet(); - future.addListener( + ChannelFuture future; + final int lowPort, highPort; + if (pool != null) { + lowPort = pool.manager.configuration().getProtocolOptions().getLowLocalPort(); + highPort = pool.manager.configuration().getProtocolOptions().getHighLocalPort(); + } else { + lowPort = highPort = -1; + } + + if (shardId == -1) { + future = bootstrap.connect(serverAddress); + } else { + int localPort = + PortAllocator.getNextAvailablePort( + shardingInfo.getShardsCount(), shardId, lowPort, highPort); + if (localPort == -1) { + throw new RuntimeException("Can't find free local port to use"); + } + + future = bootstrap.connect(serverAddress, new InetSocketAddress(localPort)); + logger.debug( + "Connecting to shard {} using local port {} (shardCount: {})\n", + shardId, + localPort, + shardingInfo.getShardsCount()); + } + + final ChannelFutureListener channelListener = new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { - writer.decrementAndGet(); if (future.cause() != null) { + // Local port busy, let's try another one + if (shardId != -1 && future.cause().getCause() instanceof BindException) { + int localPort = + PortAllocator.getNextAvailablePort( + shardingInfo.getShardsCount(), shardId, lowPort, highPort); + if (localPort != -1) { + if (future.channel() != null) { + future + .channel() + .close() + .addListener( + new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) + throws Exception { + if (future.cause() != null) { + logger.warn("Error while closing old channel", future.cause()); + } + } + }); + } + prepareBootstrap(bootstrap, protocolVersion, protocolOptions); + ChannelFuture newFuture = + bootstrap.connect(serverAddress, new InetSocketAddress(localPort)); + newFuture.addListener(this); + logger.debug( + "Retrying connecting to shard {} using local port {} (shardCount: {})\n", + shardId, + localPort, + shardingInfo.getShardsCount()); + return; + } + } logger.warn("Error creating netty channel to " + endPoint, future.cause()); } + writer.decrementAndGet(); + // Note: future.channel() can be null in some error cases, so we need to guard against // it in the rest of the code below. channel = future.channel(); @@ -228,16 +307,22 @@ public void operationComplete(ChannelFuture future) throws Exception { Connection.this.factory.allChannels.add(channel); } if (!future.isSuccess()) { - if (logger.isDebugEnabled()) + if (logger.isDebugEnabled()) { logger.debug( String.format( "%s Error connecting to %s%s", Connection.this, Connection.this.endPoint, extractMessage(future.cause()))); + } channelReadyFuture.setException( new TransportException( Connection.this.endPoint, "Cannot connect", future.cause())); + if (shardId != -1) { + // We are using advanced shard awareness, so pool must be non-null. + pool.tempBlockAdvShardAwareness(ADV_SHARD_AWARENESS_BLOCK_ON_ERROR); + } + } else { assert channel != null; logger.debug( @@ -247,7 +332,10 @@ public void operationComplete(ChannelFuture future) throws Exception { } } } - }); + }; + + writer.incrementAndGet(); + future.addListener(channelListener); } catch (RuntimeException e) { closeAsync().force(); throw e; @@ -314,6 +402,23 @@ public void onFailure(Throwable t) { return initFuture; } + private Bootstrap prepareBootstrap( + Bootstrap bootstrap, ProtocolVersion protocolVersion, ProtocolOptions protocolOptions) { + bootstrap.handler( + new Initializer( + this, + protocolVersion, + protocolOptions.getCompression().compressor(), + protocolOptions.getSSLOptions(), + factory.configuration.getPoolingOptions().getHeartbeatIntervalSeconds(), + factory.configuration.getNettyOptions(), + factory.configuration.getCodecRegistry(), + factory.configuration.getMetricsOptions().isEnabled() + ? factory.manager.metrics + : null)); + return bootstrap; + } + private static String extractMessage(Throwable t) { if (t == null) return ""; String msg = t.getMessage() == null || t.getMessage().isEmpty() ? t.toString() : t.getMessage(); @@ -333,12 +438,13 @@ private AsyncFunction onChannelReady( public ListenableFuture apply(Void input) throws Exception { Future startupOptionsFuture = write(new Requests.Options()); return GuavaCompatibility.INSTANCE.transformAsync( - startupOptionsFuture, onOptionsResponse(initExecutor), initExecutor); + startupOptionsFuture, onOptionsResponse(protocolVersion, initExecutor), initExecutor); } }; } - private AsyncFunction onOptionsResponse(final Executor initExecutor) { + private AsyncFunction onOptionsResponse( + final ProtocolVersion protocolVersion, final Executor initExecutor) { return new AsyncFunction() { @Override public ListenableFuture apply(Message.Response response) throws Exception { @@ -350,6 +456,16 @@ public ListenableFuture apply(Message.Response response) throws Exception if (sharding != null) { getHost().setShardingInfo(sharding.shardingInfo); Connection.this.shardId = sharding.shardId; + if (Connection.this.requestedShardId != -1 + && Connection.this.requestedShardId != sharding.shardId) { + logger.warn( + "Advanced shard awareness: requested connection to shard {}, but connected to {}. Is there a NAT between client and server?", + Connection.this.requestedShardId, + sharding.shardId); + // Owner is a HostConnectionPool if we are using adv. shard awareness + ((HostConnectionPool) Connection.this.ownerRef.get()) + .tempBlockAdvShardAwareness(ADV_SHARD_AWARENESS_BLOCK_ON_NAT); + } } else { getHost().setShardingInfo(null); Connection.this.shardId = 0; @@ -361,6 +477,9 @@ public ListenableFuture apply(Message.Response response) throws Exception return MoreFutures.VOID_SUCCESS; case ERROR: Responses.Error error = (Responses.Error) response; + if (isUnsupportedProtocolVersion(error)) + throw unsupportedProtocolVersionException( + protocolVersion, error.serverProtocolVersion); throw new TransportException( endPoint, String.format( @@ -425,11 +544,6 @@ private AsyncFunction onStartupResponse( return new AsyncFunction() { @Override public ListenableFuture apply(Message.Response response) throws Exception { - - if (protocolVersion.compareTo(ProtocolVersion.V5) >= 0 && response.type != ERROR) { - switchToV5Framing(); - } - switch (response.type) { case READY: return checkClusterName(protocolVersion, initExecutor); @@ -468,6 +582,7 @@ public ListenableFuture apply(Message.Response response) throws Exception case V3: case V4: case V5: + case V6: return authenticateV2(authenticator, protocolVersion, initExecutor); default: throw defunct(protocolVersion.unsupported()); @@ -650,7 +765,9 @@ private boolean isUnsupportedProtocolVersion(Responses.Error error) { // Testing for a specific string is a tad fragile but well, we don't have much choice // C* 2.1 reports a server error instead of protocol error, see CASSANDRA-9451 return (error.code == ExceptionCode.PROTOCOL_ERROR || error.code == ExceptionCode.SERVER_ERROR) - && error.message.contains("Invalid or unsupported protocol version"); + && (error.message.contains("Invalid or unsupported protocol version") + // JAVA-2924: server is behind driver and considers the proposed version as beta + || error.message.contains("Beta version of the protocol used")); } private UnsupportedProtocolVersionException unsupportedProtocolVersionException( @@ -1039,6 +1156,72 @@ public String toString() { "Connection[%s, inFlight=%d, closed=%b]", name, inFlight.get(), isClosed()); } + static class PortAllocator { + private static final AtomicInteger lastPort = new AtomicInteger(-1); + + public static int getNextAvailablePort(int shardCount, int shardId, int lowPort, int highPort) { + int lastPortValue, foundPort = -1; + do { + lastPortValue = lastPort.get(); + + // We will scan from lastPortValue + // (or lowPort is there was no lastPort or lastPort is too low) + int scanStart = lastPortValue == -1 ? lowPort : lastPortValue; + if (scanStart < lowPort) { + scanStart = lowPort; + } + + // Round it up to "% shardCount == shardId" + scanStart += (shardCount - scanStart % shardCount) + shardId; + + // Scan from scanStart upwards to highPort. + for (int port = scanStart; port <= highPort; port += shardCount) { + if (isTcpPortAvailable(port)) { + foundPort = port; + break; + } + } + + // If we started scanning from a high scanStart port + // there might have been not enough ports left that are + // smaller than highPort. Scan from the beginning + // from the lowPort. + if (foundPort == -1) { + scanStart = lowPort + (shardCount - lowPort % shardCount) + shardId; + + for (int port = scanStart; port <= highPort; port += shardCount) { + if (isTcpPortAvailable(port)) { + foundPort = port; + break; + } + } + } + + // No luck! All ports taken! + if (foundPort == -1) { + return -1; + } + } while (!lastPort.compareAndSet(lastPortValue, foundPort)); + + return foundPort; + } + + public static boolean isTcpPortAvailable(int port) { + try { + ServerSocket serverSocket = new ServerSocket(); + try { + serverSocket.setReuseAddress(false); + serverSocket.bind(new InetSocketAddress(port), 1); + return true; + } finally { + serverSocket.close(); + } + } catch (IOException ex) { + return false; + } + } + } + static class Factory { final Timer timer; @@ -1116,11 +1299,17 @@ Connection open(Host host) Connection open(HostConnectionPool pool) throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, ClusterNameMismatchException { + return open(pool, -1, 0); + } + + Connection open(HostConnectionPool pool, int shardId, int serverPort) + throws ConnectionException, InterruptedException, UnsupportedProtocolVersionException, + ClusterNameMismatchException { pool.host.convictionPolicy.signalConnectionsOpening(1); Connection connection = new Connection(buildConnectionName(pool.host), pool.host.getEndPoint(), this, pool); try { - connection.initAsync().get(); + connection.initAsync(shardId, serverPort).get(); return connection; } catch (ExecutionException e) { throw launderAsyncInitException(e); @@ -1285,6 +1474,7 @@ private void flush(FlushItem item) { flusher.start(); } + @ChannelHandler.Sharable class Dispatcher extends SimpleChannelInboundHandler { final StreamIdGenerator streamIdHandler; @@ -1756,6 +1946,8 @@ private static class Initializer extends ChannelInitializer { new Message.ProtocolEncoder(ProtocolVersion.V4); private static final Message.ProtocolEncoder messageEncoderV5 = new Message.ProtocolEncoder(ProtocolVersion.V5); + private static final Message.ProtocolEncoder messageEncoderV6 = + new Message.ProtocolEncoder(ProtocolVersion.V6); private static final Frame.Encoder frameEncoder = new Frame.Encoder(); private final ProtocolVersion protocolVersion; @@ -1822,6 +2014,8 @@ protected void initChannel(SocketChannel channel) throws Exception { pipeline.addLast("frameDecoder", new Frame.Decoder()); pipeline.addLast("frameEncoder", frameEncoder); + pipeline.addLast("framingFormatHandler", new FramingFormatHandler(connection.factory)); + if (compressor != null // Frame-level compression is only done in legacy protocol versions. In V5 and above, it // happens at a higher level ("segment" that groups multiple frames), so never install @@ -1853,45 +2047,14 @@ private Message.ProtocolEncoder messageEncoderFor(ProtocolVersion version) { return messageEncoderV4; case V5: return messageEncoderV5; + case V6: + return messageEncoderV6; default: throw new DriverInternalError("Unsupported protocol version " + protocolVersion); } } } - /** - * Rearranges the pipeline to deal with the new framing structure in protocol v5 and above. This - * has to be done manually, because it only happens once we've confirmed that the server supports - * v5. - */ - void switchToV5Framing() { - assert factory.protocolVersion.compareTo(ProtocolVersion.V5) >= 0; - - // We want to do this on the event loop, to make sure it doesn't race with incoming requests - assert channel.eventLoop().inEventLoop(); - - ChannelPipeline pipeline = channel.pipeline(); - SegmentCodec segmentCodec = - new SegmentCodec( - channel.alloc(), factory.configuration.getProtocolOptions().getCompression()); - - // Outbound: "message -> segment -> bytes" instead of "message -> frame -> bytes" - Message.ProtocolEncoder requestEncoder = - (Message.ProtocolEncoder) pipeline.get("messageEncoder"); - pipeline.replace( - "messageEncoder", - "messageToSegmentEncoder", - new MessageToSegmentEncoder(channel.alloc(), requestEncoder)); - pipeline.replace( - "frameEncoder", "segmentToBytesEncoder", new SegmentToBytesEncoder(segmentCodec)); - - // Inbound: "frame <- segment <- bytes" instead of "frame <- bytes" - pipeline.replace( - "frameDecoder", "bytesToSegmentDecoder", new BytesToSegmentDecoder(segmentCodec)); - pipeline.addAfter( - "bytesToSegmentDecoder", "segmentToFrameDecoder", new SegmentToFrameDecoder()); - } - /** A component that "owns" a connection, and should be notified when it dies. */ interface Owner { void onConnectionDefunct(Connection connection); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 3558f006d9d..da52ffaba4a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -301,7 +301,7 @@ private Connection tryConnect(Host host, boolean isInitialConnection) // If no protocol version was specified, set the default as soon as a connection succeeds (it's // needed to parse UDTs in refreshSchema) if (cluster.connectionFactory.protocolVersion == null) - cluster.connectionFactory.protocolVersion = ProtocolVersion.NEWEST_SUPPORTED; + cluster.connectionFactory.protocolVersion = ProtocolVersion.DEFAULT; try { logger.trace("[Control connection] Registering for events"); @@ -611,6 +611,17 @@ private static void updateInfo( InetAddress nativeAddress = row.getInet("native_address"); int nativePort = row.getInt("native_port"); broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); + } else if (row.getColumnDefinitions().contains("native_transport_address")) { + // DSE 6.8 introduced native_transport_address and native_transport_port for the + // listen address. Also included is native_transport_port_ssl (in case users + // want to setup a different port for SSL and non-SSL conns). + InetAddress nativeAddress = row.getInet("native_transport_address"); + int nativePort = row.getInt("native_transport_port"); + if (cluster.getCluster().getConfiguration().getProtocolOptions().getSSLOptions() != null + && !row.isNull("native_transport_port_ssl")) { + nativePort = row.getInt("native_transport_port_ssl"); + } + broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); } else if (row.getColumnDefinitions().contains("rpc_address")) { InetAddress rpcAddress = row.getInet("rpc_address"); broadcastRpcAddress = new InetSocketAddress(rpcAddress, cluster.connectionFactory.getPort()); @@ -831,6 +842,14 @@ private void refreshNodeListAndTokenMap( InetAddress nativeAddress = row.getInet("native_address"); int nativePort = row.getInt("native_port"); broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); + } else if (row.getColumnDefinitions().contains("native_transport_address")) { + InetAddress nativeAddress = row.getInet("native_transport_address"); + int nativePort = row.getInt("native_transport_port"); + if (cluster.getCluster().getConfiguration().getProtocolOptions().getSSLOptions() != null + && !row.isNull("native_transport_port_ssl")) { + nativePort = row.getInt("native_transport_port_ssl"); + } + broadcastRpcAddress = new InetSocketAddress(nativeAddress, nativePort); } else { InetAddress rpcAddress = row.getInet("rpc_address"); broadcastRpcAddress = @@ -956,7 +975,11 @@ private boolean isValidPeer(Row peerRow, boolean logIfInvalid) { && !peerRow.isNull("native_port"); } else { isValid &= - peerRow.getColumnDefinitions().contains("rpc_address") && !peerRow.isNull("rpc_address"); + (peerRow.getColumnDefinitions().contains("rpc_address") && !peerRow.isNull("rpc_address")) + || (peerRow.getColumnDefinitions().contains("native_transport_address") + && peerRow.getColumnDefinitions().contains("native_transport_port") + && !peerRow.isNull("native_transport_address") + && !peerRow.isNull("native_transport_port")); } if (EXTENDED_PEER_CHECK) { @@ -984,6 +1007,9 @@ private String formatInvalidPeer(Row peerRow) { formatMissingOrNullColumn(peerRow, "native_address", sb); formatMissingOrNullColumn(peerRow, "native_port", sb); } else { + formatMissingOrNullColumn(peerRow, "native_transport_address", sb); + formatMissingOrNullColumn(peerRow, "native_transport_port", sb); + formatMissingOrNullColumn(peerRow, "native_transport_port_ssl", sb); formatMissingOrNullColumn(peerRow, "rpc_address", sb); } if (EXTENDED_PEER_CHECK) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java b/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java index ef840356f3c..b4617b14f4e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataTypeClassNameParser.java @@ -123,7 +123,7 @@ static DataType parseOne( TypeCodec.varchar() .deserialize(Bytes.fromHexString("0x" + parser.readOne()), protocolVersion); parser.skipBlankAndComma(); - Map rawFields = parser.getNameAndTypeParameters(); + Map rawFields = parser.getNameAndTypeParameters(protocolVersion); List fields = new ArrayList(rawFields.size()); for (Map.Entry entry : rawFields.entrySet()) fields.add( @@ -199,7 +199,7 @@ static ParseResult parseWithComposite( count--; Parser collectionParser = new Parser(last, 0); collectionParser.parseNextName(); // skips columnToCollectionType - Map params = collectionParser.getCollectionsParameters(); + Map params = collectionParser.getCollectionsParameters(protocolVersion); for (Map.Entry entry : params.entrySet()) collections.put(entry.getKey(), parseOne(entry.getValue(), protocolVersion, codecRegistry)); } @@ -320,18 +320,18 @@ public List getTypeParameters() { "Syntax error parsing '%s' at char %d: unexpected end of string", str, idx)); } - public Map getCollectionsParameters() { + public Map getCollectionsParameters(ProtocolVersion protocolVersion) { if (isEOS()) return Collections.emptyMap(); if (str.charAt(idx) != '(') throw new IllegalStateException(); ++idx; // skipping '(' - return getNameAndTypeParameters(); + return getNameAndTypeParameters(protocolVersion); } // Must be at the start of the first parameter to read - public Map getNameAndTypeParameters() { + public Map getNameAndTypeParameters(ProtocolVersion protocolVersion) { // The order of the hashmap matters for UDT Map map = new LinkedHashMap(); @@ -345,8 +345,7 @@ public Map getNameAndTypeParameters() { String name = null; try { name = - TypeCodec.varchar() - .deserialize(Bytes.fromHexString("0x" + bbHex), ProtocolVersion.NEWEST_SUPPORTED); + TypeCodec.varchar().deserialize(Bytes.fromHexString("0x" + bbHex), protocolVersion); } catch (NumberFormatException e) { throwSyntaxError(e.getMessage()); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java b/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java index c1378abf27d..b9caa3ea1a4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DefaultEndPointFactory.java @@ -49,6 +49,16 @@ public EndPoint create(Row peersRow) { InetSocketAddress translateAddress = cluster.manager.translateAddress(new InetSocketAddress(nativeAddress, nativePort)); return new TranslatedAddressEndPoint(translateAddress); + } else if (peersRow.getColumnDefinitions().contains("native_transport_address")) { + InetAddress nativeAddress = peersRow.getInet("native_transport_address"); + int nativePort = peersRow.getInt("native_transport_port"); + if (cluster.getConfiguration().getProtocolOptions().getSSLOptions() != null + && !peersRow.isNull("native_transport_port_ssl")) { + nativePort = peersRow.getInt("native_transport_port_ssl"); + } + InetSocketAddress translateAddress = + cluster.manager.translateAddress(new InetSocketAddress(nativeAddress, nativePort)); + return new TranslatedAddressEndPoint(translateAddress); } else { InetAddress broadcastAddress = peersRow.getInet("peer"); InetAddress rpcAddress = peersRow.getInet("rpc_address"); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java index b6c6d0af50f..4a745da5ff0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ExceptionCode.java @@ -36,6 +36,8 @@ enum ExceptionCode { READ_FAILURE(0x1300), FUNCTION_FAILURE(0x1400), WRITE_FAILURE(0x1500), + CDC_WRITE_FAILURE(0x1600), + CAS_WRITE_UNKNOWN(0x1700), // 2xx: problem validating the request SYNTAX_ERROR(0x2000), diff --git a/driver-core/src/main/java/com/datastax/driver/core/Frame.java b/driver-core/src/main/java/com/datastax/driver/core/Frame.java index 60b8846e551..ac5ee54727f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Frame.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Frame.java @@ -112,6 +112,7 @@ private static int readStreamId(ByteBuf fullFrame, ProtocolVersion version) { case V3: case V4: case V5: + case V6: return fullFrame.readShort(); default: throw version.unsupported(); @@ -162,6 +163,7 @@ static int lengthFor(ProtocolVersion version) { case V3: case V4: case V5: + case V6: return 9; default: throw version.unsupported(); @@ -180,6 +182,7 @@ public void encodeInto(ByteBuf destination) { case V3: case V4: case V5: + case V6: destination.writeShort(streamId); break; default: diff --git a/driver-core/src/main/java/com/datastax/driver/core/FramingFormatHandler.java b/driver-core/src/main/java/com/datastax/driver/core/FramingFormatHandler.java new file mode 100644 index 00000000000..91459a1ab34 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/FramingFormatHandler.java @@ -0,0 +1,78 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core; + +import com.datastax.driver.core.Message.Response.Type; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.handler.codec.MessageToMessageDecoder; +import java.util.List; + +/** + * A handler to deal with different protocol framing formats. + * + *

This handler detects when a handshake is successful; then, if necessary, adapts the pipeline + * to the modern framing format introduced in protocol v5. + */ +public class FramingFormatHandler extends MessageToMessageDecoder { + + private final Connection.Factory factory; + + FramingFormatHandler(Connection.Factory factory) { + this.factory = factory; + } + + @Override + protected void decode(ChannelHandlerContext ctx, Frame frame, List out) throws Exception { + boolean handshakeSuccessful = + frame.header.opcode == Type.READY.opcode || frame.header.opcode == Type.AUTHENTICATE.opcode; + if (handshakeSuccessful) { + // By default, the pipeline is configured for legacy framing since this is the format used + // by all protocol versions until handshake; after handshake however, we need to switch to + // modern framing for protocol v5 and higher. + if (frame.header.version.compareTo(ProtocolVersion.V5) >= 0) { + switchToModernFraming(ctx); + } + // once the handshake is successful, the framing format cannot change anymore; + // we can safely remove ourselves from the pipeline. + ctx.pipeline().remove("framingFormatHandler"); + } + out.add(frame); + } + + private void switchToModernFraming(ChannelHandlerContext ctx) { + ChannelPipeline pipeline = ctx.pipeline(); + SegmentCodec segmentCodec = + new SegmentCodec( + ctx.channel().alloc(), factory.configuration.getProtocolOptions().getCompression()); + + // Outbound: "message -> segment -> bytes" instead of "message -> frame -> bytes" + Message.ProtocolEncoder requestEncoder = + (Message.ProtocolEncoder) pipeline.get("messageEncoder"); + pipeline.replace( + "messageEncoder", + "messageToSegmentEncoder", + new MessageToSegmentEncoder(ctx.channel().alloc(), requestEncoder)); + pipeline.replace( + "frameEncoder", "segmentToBytesEncoder", new SegmentToBytesEncoder(segmentCodec)); + + // Inbound: "frame <- segment <- bytes" instead of "frame <- bytes" + pipeline.replace( + "frameDecoder", "bytesToSegmentDecoder", new BytesToSegmentDecoder(segmentCodec)); + pipeline.addAfter( + "bytesToSegmentDecoder", "segmentToFrameDecoder", new SegmentToFrameDecoder()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java b/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java index a0e7fe8ec55..069f550b049 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java +++ b/driver-core/src/main/java/com/datastax/driver/core/GuavaCompatibility.java @@ -19,6 +19,7 @@ import com.google.common.base.Function; import com.google.common.collect.BiMap; import com.google.common.collect.Maps; +import com.google.common.net.HostAndPort; import com.google.common.reflect.TypeToken; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.FutureCallback; @@ -180,6 +181,24 @@ public abstract ListenableFuture transformAsync( */ public abstract Executor sameThreadExecutor(); + /** + * Returns the portion of the given {@link HostAndPort} instance that should represent the + * hostname or IPv4/IPv6 literal. + * + *

The method {@code HostAndPort.getHostText} has been replaced with {@code + * HostAndPort.getHost} starting with Guava 20.0; it has been completely removed in Guava 22.0. + */ + @SuppressWarnings("JavaReflectionMemberAccess") + public String getHost(HostAndPort hostAndPort) { + try { + // Guava >= 20.0 + return (String) HostAndPort.class.getMethod("getHost").invoke(hostAndPort); + } catch (Exception e) { + // Guava < 22.0 + return hostAndPort.getHostText(); + } + } + private static GuavaCompatibility selectImplementation() { if (isGuava_19_0_OrHigher()) { logger.info("Detected Guava >= 19 in the classpath, using modern compatibility layer"); diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 67ca642eb31..d967bc937d2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -41,6 +41,7 @@ import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; import io.netty.util.concurrent.EventExecutor; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; @@ -96,6 +97,8 @@ class HostConnectionPool implements Connection.Owner { private final AtomicReference closeFuture = new AtomicReference(); + private long advShardAwarenessBlockedUntil = 0; + private enum Phase { INITIALIZING, READY, @@ -110,15 +113,10 @@ public static class ConnectionTasksSharedState { private int tasksInFlight = 0; private Map connectionsToClose = new HashMap(); - public Connection registerTask(int shardId) { - Connection c = null; + public void registerTask() { synchronized (lock) { - c = connectionsToClose.remove(shardId); - if (c == null) { - ++tasksInFlight; - } + ++tasksInFlight; } - return c; } public void unregisterTask() { @@ -137,6 +135,12 @@ public void unregisterTask() { } } + public Connection getConnection(int shardId) { + synchronized (lock) { + return connectionsToClose.remove(shardId); + } + } + public Connection addConnectionToClose(int shardId, Connection c) { Connection res = null; boolean close = false; @@ -154,21 +158,96 @@ public Connection addConnectionToClose(int shardId, Connection c) { } } + private boolean canUseAdvancedShardAwareness(boolean logResult) { + ShardingInfo shardingInfo = host.getShardingInfo(); + if (shardingInfo == null) { + if (logResult) { + logger.warn( + "Not using advanced port-based shard awareness with {} because sharding info is missing", + host); + } + return false; + } + if (!manager.configuration().getProtocolOptions().isUseAdvancedShardAwareness()) { + if (logResult) { + logger.warn( + "Not using advanced port-based shard awareness with {} because it's disabled in configuration", + host); + } + return false; + } + + boolean isSSLUsed = null != manager.configuration().getProtocolOptions().getSSLOptions(); + if (shardingInfo.getShardAwarePort(isSSLUsed) == 0) { + if (logResult) { + logger.warn( + "Not using advanced port-based shard awareness with {} because we're missing port-based shard awareness port on the server", + host); + } + return false; + } + + if (System.currentTimeMillis() < advShardAwarenessBlockedUntil) { + if (logResult) { + logger.warn( + "Not using advanced port-based shard awareness with {} because of a previous error", + host); + } + return false; + } + + if (logResult) { + logger.info("Using advanced port-based shard awareness with {}", host); + } + return true; + } + + public void tempBlockAdvShardAwareness(long millis) { + advShardAwarenessBlockedUntil = + Math.max(System.currentTimeMillis() + millis, advShardAwarenessBlockedUntil); + } + private final ConnectionTasksSharedState connectionTasksSharedState = new ConnectionTasksSharedState(); + private void scheduleConnectionTask(final ConnectionTask task) { + timeoutsExecutor.schedule( + new Runnable() { + public void run() { + manager.blockingExecutor().submit(task); + } + }, + 100, + TimeUnit.MILLISECONDS); + } + + private enum ConnectionResult { + SUCCESS, + SHOULD_RETRY, + FAILED, + } + private class ConnectionTask implements Runnable { private final int shardId; public ConnectionTask(int shardId) { this.shardId = shardId; + connectionTasksSharedState.registerTask(); } @Override public void run() { - addConnectionIfUnderMaximum(shardId, connectionTasksSharedState); - scheduledForCreation[shardId].decrementAndGet(); + switch (addConnectionIfUnderMaximum(shardId, connectionTasksSharedState)) { + case SUCCESS: + case FAILED: + connectionTasksSharedState.unregisterTask(); + scheduledForCreation[shardId].decrementAndGet(); + break; + case SHOULD_RETRY: + scheduleConnectionTask(this); + break; + } } } @@ -234,7 +313,7 @@ ListenableFuture initAsyncWithConnection(Connection reusedConnection) { pendingBorrows[i] = new ConcurrentLinkedQueue(); } - final List connections = Lists.newArrayListWithCapacity(2 * toCreate); + final List connections = Lists.newArrayListWithCapacity(toCreate); final List> connectionFutures = Lists.newArrayListWithCapacity(2 * toCreate); @@ -242,12 +321,39 @@ ListenableFuture initAsyncWithConnection(Connection reusedConnection) { connections.add(reusedConnection); connectionFutures.add(MoreFutures.VOID_SUCCESS); - List newConnections = - manager.connectionFactory().newConnections(this, 2 * toCreate); + List newConnections = manager.connectionFactory().newConnections(this, toCreate); connections.addAll(newConnections); - for (Connection connection : newConnections) { - ListenableFuture connectionFuture = connection.initAsync(); - connectionFutures.add(handleErrors(connectionFuture, initExecutor)); + + if (canUseAdvancedShardAwareness(true)) { + ShardingInfo shardingInfo = host.getShardingInfo(); + boolean isSSLUsed = null != manager.configuration().getProtocolOptions().getSSLOptions(); + int serverPort = shardingInfo.getShardAwarePort(isSSLUsed); + + int shardId = 0; + int shardConnectionIndex = 0; + for (Connection connection : newConnections) { + if (shardConnectionIndex == connectionsPerShard) { + shardConnectionIndex = 0; + shardId++; + } + if (shardId == reusedConnection.shardId() && shardConnectionIndex == 0) { + shardConnectionIndex++; + if (shardConnectionIndex == connectionsPerShard) { + shardConnectionIndex = 0; + shardId++; + } + } + + ListenableFuture connectionFuture = connection.initAsync(shardId, serverPort); + connectionFutures.add(handleErrors(connectionFuture, initExecutor)); + + shardConnectionIndex++; + } + } else { + for (Connection connection : newConnections) { + ListenableFuture connectionFuture = connection.initAsync(); + connectionFutures.add(handleErrors(connectionFuture, initExecutor)); + } } final SettableFuture initFuture = SettableFuture.create(); @@ -421,14 +527,12 @@ ListenableFuture borrowConnection( if (host.convictionPolicy.canReconnectNow()) { if (connectionsPerShard == 0) { maybeSpawnNewConnection(shardId); - return enqueue(timeout, unit, maxQueueSize, shardId); } else if (scheduledForCreation[shardId].compareAndSet(0, connectionsPerShard)) { for (int i = 0; i < connectionsPerShard; i++) { // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to // protect against creating connection in excess of core too quickly manager.blockingExecutor().submit(new ConnectionTask(shardId)); } - return enqueue(timeout, unit, maxQueueSize, shardId); } } // connections for this shard are still being initialized so pick connection for any shard @@ -650,19 +754,20 @@ private void doTrashConnection(Connection connection) { trash[connection.shardId()].add(connection); } - private boolean addConnectionIfUnderMaximum(int shardId, ConnectionTasksSharedState sharedState) { + private ConnectionResult addConnectionIfUnderMaximum( + int shardId, ConnectionTasksSharedState sharedState) { // First, make sure we don't cross the allowed limit of open connections for (; ; ) { int opened = open[shardId].get(); - if (opened >= maxConnectionsPerShard) return false; + if (opened >= maxConnectionsPerShard) return ConnectionResult.FAILED; if (open[shardId].compareAndSet(opened, opened + 1)) break; } if (phase.get() != Phase.READY) { open[shardId].decrementAndGet(); - return false; + return ConnectionResult.FAILED; } // Now really open the connection @@ -671,22 +776,36 @@ private boolean addConnectionIfUnderMaximum(int shardId, ConnectionTasksSharedSt if (newConnection == null) { if (!host.convictionPolicy.canReconnectNow()) { open[shardId].decrementAndGet(); - return false; + return ConnectionResult.SHOULD_RETRY; } - logger.debug("Creating new connection on busy pool to {}", host); - newConnection = sharedState.registerTask(shardId); + newConnection = sharedState.getConnection(shardId); if (newConnection == null) { - try { - do { - newConnection = manager.connectionFactory().open(this); - if (newConnection.shardId() == shardId) { - newConnection.setKeyspace(manager.poolsState.keyspace); - } else { - newConnection = sharedState.addConnectionToClose(shardId, newConnection); - } - } while (newConnection == null); - } finally { - sharedState.unregisterTask(); + InetSocketAddress serverAddress = host.getEndPoint().resolve(); + int serverPort, effectiveShardId = shardId; + if (canUseAdvancedShardAwareness(false)) { + ShardingInfo shardingInfo = host.getShardingInfo(); + boolean isSSLUsed = + null != manager.configuration().getProtocolOptions().getSSLOptions(); + serverPort = shardingInfo.getShardAwarePort(isSSLUsed); + } else { + effectiveShardId = -1; + serverPort = serverAddress.getPort(); + } + + logger.debug( + "Creating new connection to {}:{} for shard {}", + serverAddress.getAddress().getHostAddress(), + serverPort, + shardId); + newConnection = manager.connectionFactory().open(this, effectiveShardId, serverPort); + if (newConnection.shardId() == shardId) { + newConnection.setKeyspace(manager.poolsState.keyspace); + } else { + newConnection = sharedState.addConnectionToClose(shardId, newConnection); + if (newConnection == null) { + open[shardId].decrementAndGet(); + return ConnectionResult.SHOULD_RETRY; + } } } } @@ -699,40 +818,40 @@ private boolean addConnectionIfUnderMaximum(int shardId, ConnectionTasksSharedSt if (isClosed() && !newConnection.isClosed()) { close(newConnection); open[shardId].decrementAndGet(); - return false; + return ConnectionResult.FAILED; } dequeue(newConnection); - return true; + return ConnectionResult.SUCCESS; } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Skip the open but ignore otherwise open[shardId].decrementAndGet(); - return false; + return ConnectionResult.FAILED; } catch (ConnectionException e) { open[shardId].decrementAndGet(); logger.debug("Connection error to {} while creating additional connection", host); - return false; + return ConnectionResult.FAILED; } catch (AuthenticationException e) { // This shouldn't really happen in theory open[shardId].decrementAndGet(); logger.error( "Authentication error while creating additional connection (error is: {})", e.getMessage()); - return false; + return ConnectionResult.FAILED; } catch (UnsupportedProtocolVersionException e) { // This shouldn't happen since we shouldn't have been able to connect in the first place open[shardId].decrementAndGet(); logger.error( "UnsupportedProtocolVersionException error while creating additional connection (error is: {})", e.getMessage()); - return false; + return ConnectionResult.FAILED; } catch (ClusterNameMismatchException e) { open[shardId].decrementAndGet(); logger.error( "ClusterNameMismatchException error while creating additional connection (error is: {})", e.getMessage()); - return false; + return ConnectionResult.FAILED; } } @@ -765,7 +884,7 @@ private void maybeSpawnNewConnection(int shardId) { if (scheduledForCreation[shardId].compareAndSet(inCreation, inCreation + 1)) break; } - manager.blockingExecutor().submit(new ConnectionTask(shardId)); + scheduleConnectionTask(new ConnectionTask(shardId)); } @Override @@ -932,7 +1051,7 @@ void ensureCoreConnections() { // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to // protect against creating connection in excess of core too quickly scheduledForCreation[shardId].incrementAndGet(); - manager.blockingExecutor().submit(new ConnectionTask(shardId)); + scheduleConnectionTask(new ConnectionTask(shardId)); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Metadata.java b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java index 0a5c82829cd..f24d5b588ae 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Metadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java @@ -66,6 +66,8 @@ public class Metadata { // See https://github.com/apache/cassandra/blob/trunk/doc/cql3/CQL.textile#appendixA private static final IntObjectHashMap> RESERVED_KEYWORDS = indexByCaseInsensitiveHash( + "-infinity", + "-nan", "add", "allow", "alter", @@ -77,38 +79,54 @@ public class Metadata { "batch", "begin", "by", + "cast", "columnfamily", "create", + "default", "delete", "desc", + "describe", "drop", "each_quorum", + "entries", + "execute", "from", + "full", "grant", + "if", "in", "index", "inet", "infinity", "insert", "into", + "is", "keyspace", "keyspaces", "limit", "local_one", "local_quorum", + "materialized", "modify", "nan", "norecursive", + "not", + "null", "of", "on", "one", + "or", "order", "password", "primary", "quorum", "rename", + "replace", "revoke", "schema", + "scylla_clustering_bound", + "scylla_counter_shard_list", + "scylla_timeuuid_list_index", "select", "set", "table", @@ -118,9 +136,11 @@ public class Metadata { "truncate", "two", "unlogged", + "unset", "update", "use", "using", + "view", "where", "with"); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java index e72a11e7cdd..eb841598e3d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolEvent.java @@ -155,6 +155,7 @@ static SchemaChange deserializeEvent(ByteBuf bb, ProtocolVersion version) { case V3: case V4: case V5: + case V6: change = CBUtil.readEnumValue(Change.class, bb); targetType = CBUtil.readEnumValue(SchemaElement.class, bb); targetKeyspace = CBUtil.readString(bb); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java index 57f9d6f79b9..fdbdbbe6ef6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolFeature.java @@ -42,7 +42,7 @@ enum ProtocolFeature { boolean isSupportedBy(ProtocolVersion version) { switch (this) { case PREPARED_METADATA_CHANGES: - return version == ProtocolVersion.V5; + return version.compareTo(ProtocolVersion.V5) >= 0; case CUSTOM_PAYLOADS: return version.compareTo(ProtocolVersion.V4) >= 0; case CLIENT_TIMESTAMPS: diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java index 4d4d215a049..f78ea495595 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java @@ -71,6 +71,9 @@ public String toString() { /** The default value for {@link #getMaxSchemaAgreementWaitSeconds()}: 10. */ public static final int DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS = 10; + public static final int DEFAULT_LOCAL_PORT_LOW = 10000; + public static final int DEFAULT_LOCAL_PORT_HIGH = 60000; + private volatile Cluster.Manager manager; private final int port; @@ -85,6 +88,10 @@ public String toString() { private volatile Compression compression = Compression.NONE; + private final boolean useAdvancedShardAwarness; + private final int localPortLow; + private final int localPortHigh; + /** * Creates a new {@code ProtocolOptions} instance using the {@code DEFAULT_PORT} (and without * SSL). @@ -144,12 +151,53 @@ public ProtocolOptions( SSLOptions sslOptions, AuthProvider authProvider, boolean noCompact) { + this( + port, + protocolVersion, + maxSchemaAgreementWaitSeconds, + sslOptions, + authProvider, + noCompact, + true, + DEFAULT_LOCAL_PORT_LOW, + DEFAULT_LOCAL_PORT_HIGH); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port and SSL context. + * + * @param port the port to use for the binary protocol. + * @param protocolVersion the protocol version to use. This can be {@code null}, in which case the + * version used will be the biggest version supported by the first node the driver + * connects to. See {@link Cluster.Builder#withProtocolVersion} for more details. + * @param sslOptions the SSL options to use. Use {@code null} if SSL is not to be used. + * @param authProvider the {@code AuthProvider} to use for authentication against the Cassandra + * nodes. + * @param noCompact whether or not to include the NO_COMPACT startup option. + * @param useAdvancedShardAwarness should the advanced shard awarness (choosing shard to connect + * to by client-side port) be used. + * @param portLow lower bound (inclusive) of ports to use for advanced shard awareness. + * @param portHigh upper bound (inclusive) of ports to use for advanced shard awareness. + */ + public ProtocolOptions( + int port, + ProtocolVersion protocolVersion, + int maxSchemaAgreementWaitSeconds, + SSLOptions sslOptions, + AuthProvider authProvider, + boolean noCompact, + boolean useAdvancedShardAwarness, + int portLow, + int portHigh) { this.port = port; this.initialProtocolVersion = protocolVersion; this.maxSchemaAgreementWaitSeconds = maxSchemaAgreementWaitSeconds; this.sslOptions = sslOptions; this.authProvider = authProvider; this.noCompact = noCompact; + this.useAdvancedShardAwarness = useAdvancedShardAwarness; + this.localPortLow = portLow; + this.localPortHigh = portHigh; } void register(Cluster.Manager manager) { @@ -247,4 +295,17 @@ public AuthProvider getAuthProvider() { public boolean isNoCompact() { return noCompact; } + + /** @return Wheter advanced or non-advanced shard awareness is used. */ + public boolean isUseAdvancedShardAwareness() { + return useAdvancedShardAwarness; + } + + public int getLowLocalPort() { + return localPortLow; + } + + public int getHighLocalPort() { + return localPortHigh; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java index 5e10af33447..c53d13a8303 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolVersion.java @@ -32,13 +32,20 @@ public enum ProtocolVersion { V2("2.0.0", 2, V1), V3("2.1.0", 3, V2), V4("2.2.0", 4, V3), - V5("3.10.0", 5, V4); + V5("4.0.0", 5, V4), + V6("4.0.0", 6, V5); /** The most recent protocol version supported by the driver. */ - public static final ProtocolVersion NEWEST_SUPPORTED = V4; + public static final ProtocolVersion NEWEST_SUPPORTED = V5; /** The most recent beta protocol version supported by the driver. */ - public static final ProtocolVersion NEWEST_BETA = V5; + public static final ProtocolVersion NEWEST_BETA = V6; + + /** + * The default protocol version used by the driver to connect to the cluster if no specific + * protocol version was set. + */ + public static final ProtocolVersion DEFAULT = V4; private final VersionNumber minCassandraVersion; diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java index 556ee0a8a01..9fcd6a437d4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java @@ -70,6 +70,8 @@ public class QueryOptions { private volatile Cluster.Manager manager; private volatile boolean prepareOnAllHosts = true; + private volatile boolean schemaQueriesPaged = true; + /** * Creates a new {@link QueryOptions} instance using the {@link #DEFAULT_CONSISTENCY_LEVEL}, * {@link #DEFAULT_SERIAL_CONSISTENCY_LEVEL} and {@link #DEFAULT_FETCH_SIZE}. @@ -321,6 +323,26 @@ public boolean isMetadataEnabled() { return metadataEnabled; } + /** + * Toggle schema queries paging. + * + * @param enabled whether paging is enabled in schema queries. + * @return this {@code QueryOptions} instance. + */ + public QueryOptions setSchemaQueriesPaged(boolean enabled) { + this.schemaQueriesPaged = enabled; + return this; + } + + /** + * Whether schema queries are using paging. + * + * @return the value. + */ + public boolean isSchemaQueriesPaged() { + return schemaQueriesPaged; + } + /** * Sets the default window size in milliseconds used to debounce node list refresh requests. * @@ -486,18 +508,19 @@ public boolean equals(Object that) { QueryOptions other = (QueryOptions) that; return (this.consistency.equals(other.consistency) - && this.serialConsistency.equals(other.serialConsistency) - && this.fetchSize == other.fetchSize - && this.defaultIdempotence == other.defaultIdempotence - && this.metadataEnabled == other.metadataEnabled - && this.maxPendingRefreshNodeListRequests == other.maxPendingRefreshNodeListRequests - && this.maxPendingRefreshNodeRequests == other.maxPendingRefreshNodeRequests - && this.maxPendingRefreshSchemaRequests == other.maxPendingRefreshSchemaRequests - && this.refreshNodeListIntervalMillis == other.refreshNodeListIntervalMillis - && this.refreshNodeIntervalMillis == other.refreshNodeIntervalMillis - && this.refreshSchemaIntervalMillis == other.refreshSchemaIntervalMillis - && this.reprepareOnUp == other.reprepareOnUp - && this.prepareOnAllHosts == other.prepareOnAllHosts); + && this.serialConsistency.equals(other.serialConsistency) + && this.fetchSize == other.fetchSize + && this.defaultIdempotence == other.defaultIdempotence + && this.metadataEnabled == other.metadataEnabled + && this.maxPendingRefreshNodeListRequests == other.maxPendingRefreshNodeListRequests + && this.maxPendingRefreshNodeRequests == other.maxPendingRefreshNodeRequests + && this.maxPendingRefreshSchemaRequests == other.maxPendingRefreshSchemaRequests + && this.refreshNodeListIntervalMillis == other.refreshNodeListIntervalMillis + && this.refreshNodeIntervalMillis == other.refreshNodeIntervalMillis + && this.refreshSchemaIntervalMillis == other.refreshSchemaIntervalMillis + && this.reprepareOnUp == other.reprepareOnUp + && this.prepareOnAllHosts == other.prepareOnAllHosts) + && this.schemaQueriesPaged == other.schemaQueriesPaged; } @Override @@ -515,7 +538,8 @@ public int hashCode() { refreshNodeIntervalMillis, refreshSchemaIntervalMillis, reprepareOnUp, - prepareOnAllHosts); + prepareOnAllHosts, + schemaQueriesPaged); } public boolean isConsistencySet() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java b/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java index 27f08226e60..3a9e8fe90d7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RegularStatement.java @@ -194,6 +194,7 @@ public int requestSizeInBytes(ProtocolVersion protocolVersion, CodecRegistry cod case V3: case V4: case V5: + case V6: size += CBUtil.sizeOfConsistencyLevel(getConsistencyLevel()); size += QueryFlag.serializedSize(protocolVersion); if (hasValues()) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Requests.java b/driver-core/src/main/java/com/datastax/driver/core/Requests.java index 88face241ab..2e29cde8b2d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Requests.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Requests.java @@ -402,6 +402,7 @@ void encode(ByteBuf dest, ProtocolVersion version) { case V3: case V4: case V5: + case V6: CBUtil.writeConsistencyLevel(consistency, dest); QueryFlag.serialize(flags, dest, version); if (flags.contains(QueryFlag.VALUES)) { @@ -439,6 +440,7 @@ int encodedSize(ProtocolVersion version) { case V3: case V4: case V5: + case V6: int size = 0; size += CBUtil.sizeOfConsistencyLevel(consistency); size += QueryFlag.serializedSize(version); @@ -610,6 +612,7 @@ void encode(ByteBuf dest, ProtocolVersion version) { case V3: case V4: case V5: + case V6: CBUtil.writeConsistencyLevel(consistency, dest); QueryFlag.serialize(flags, dest, version); if (flags.contains(QueryFlag.SERIAL_CONSISTENCY)) @@ -630,6 +633,7 @@ int encodedSize(ProtocolVersion version) { case V3: case V4: case V5: + case V6: int size = 0; size += CBUtil.sizeOfConsistencyLevel(consistency); size += QueryFlag.serializedSize(version); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Responses.java b/driver-core/src/main/java/com/datastax/driver/core/Responses.java index ab25da21b10..72aae8e3b54 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Responses.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Responses.java @@ -30,6 +30,8 @@ import com.datastax.driver.core.exceptions.AlreadyExistsException; import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.BootstrappingException; +import com.datastax.driver.core.exceptions.CASWriteUnknownException; +import com.datastax.driver.core.exceptions.CDCWriteException; import com.datastax.driver.core.exceptions.DriverException; import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.FunctionExecutionException; @@ -125,6 +127,12 @@ public Error decode(ByteBuf body, ProtocolVersion version, CodecRegistry codecRe clt, received, blockFor, failures, failuresMap, dataPresent != 0); } break; + case CAS_WRITE_UNKNOWN: + clt = CBUtil.readConsistencyLevel(body); + received = body.readInt(); + blockFor = body.readInt(); + infos = new CASWriteUnknownException(clt, received, blockFor); + break; case UNPREPARED: infos = MD5Digest.wrap(CBUtil.readBytes(body)); break; @@ -179,6 +187,10 @@ DriverException asException(EndPoint endPoint) { return ((ReadFailureException) infos).copy(endPoint); case FUNCTION_FAILURE: return new FunctionExecutionException(endPoint, message); + case CDC_WRITE_FAILURE: + return new CDCWriteException(endPoint, message); + case CAS_WRITE_UNKNOWN: + return ((CASWriteUnknownException) infos).copy(endPoint); case SYNTAX_ERROR: return new SyntaxError(endPoint, message); case UNAUTHORIZED: @@ -616,6 +628,7 @@ private Metadata decodeResultMetadata( case V3: case V4: case V5: + case V6: return Rows.Metadata.decode(body, version, codecRegistry); default: throw version.unsupported(); @@ -690,6 +703,7 @@ public Result decode( case V3: case V4: case V5: + case V6: change = CBUtil.readEnumValue(Change.class, body); targetType = CBUtil.readEnumValue(SchemaElement.class, body); targetKeyspace = CBUtil.readString(body); diff --git a/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java b/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java index 74443a22741..6354595b9e5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SchemaParser.java @@ -13,6 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +/* + * Copyright (C) 2021 ScyllaDB + * + * Modified by ScyllaDB + */ package com.datastax.driver.core; import static com.datastax.driver.core.SchemaElement.AGGREGATE; @@ -34,6 +40,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.ExecutionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -255,7 +262,7 @@ private Map buildKeyspaces( return keyspaces; } - private Map buildTables( + protected Map buildTables( KeyspaceMetadata keyspace, List tableRows, Map> colsDefs, @@ -322,7 +329,7 @@ private Map buildTables( return tables; } - private Map buildUserTypes( + protected Map buildUserTypes( KeyspaceMetadata keyspace, List udtRows, VersionNumber cassandraVersion, @@ -342,7 +349,7 @@ protected List maybeSortUdts(List udtRows, Cluster cluster, String key return udtRows; } - private Map buildFunctions( + protected Map buildFunctions( KeyspaceMetadata keyspace, List functionRows, VersionNumber cassandraVersion, @@ -362,7 +369,7 @@ private Map buildFunctions( return functions; } - private Map buildAggregates( + protected Map buildAggregates( KeyspaceMetadata keyspace, List aggregateRows, VersionNumber cassandraVersion, @@ -382,7 +389,7 @@ private Map buildAggregates( return aggregates; } - private Map buildViews( + protected Map buildViews( KeyspaceMetadata keyspace, List viewRows, Map> colsDefs, @@ -417,7 +424,7 @@ private Map buildViews( // Update oldKeyspaces with the changes contained in newKeyspaces. // This method also takes care of triggering the relevant events - private void updateKeyspaces( + protected void updateKeyspaces( Metadata metadata, Map oldKeyspaces, Map newKeyspaces, @@ -848,9 +855,368 @@ private static class V3SchemaParser extends SchemaParser { protected static final String SELECT_VIEWS = "SELECT * FROM system_schema.views"; private static final String TABLE_NAME = "table_name"; + private static final String VIEW_NAME = "view_name"; + private static final String COLUMN_NAME = "column_name"; + private static final String INDEX_NAME = "index_name"; + private static final String FUNCTION_NAME = "function_name"; + private static final String ARGUMENT_TYPES = "argument_types"; + private static final String AGGREGATE_NAME = "aggregate_name"; + private static final String TYPE_NAME = "type_name"; + private static final String LIMIT = " LIMIT 1000"; + + private List fetchUDTs( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = SELECT_USERTYPES + whereClause(KEYSPACE, keyspace.getName(), null, null); + List result = new ArrayList(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + result.addAll(rs); + String lastSeen = "'" + result.get(result.size() - 1).getString(TYPE_NAME) + "'"; + rs = + queryAsync( + queryPrefix + " AND " + TYPE_NAME + " > " + lastSeen + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private void buildUDTs( + KeyspaceMetadata keyspace, + Cluster cluster, + Connection connection, + VersionNumber cassandraVersion, + ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + List raw = fetchUDTs(keyspace, connection, protocolVersion); + Map userTypes = buildUserTypes(keyspace, raw, cassandraVersion, cluster); + for (UserType userType : userTypes.values()) { + keyspace.add(userType); + } + } + + private List fetchFunctions( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = SELECT_FUNCTIONS + whereClause(KEYSPACE, keyspace.getName(), null, null); + List result = new ArrayList(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + String lastSeenFunction = "'" + rs.get(rs.size() - 1).getString(FUNCTION_NAME) + "'"; + StringBuilder sb = new StringBuilder(); + sb.append("["); + boolean first = true; + for (String arg_type : rs.get(rs.size() - 1).getList(ARGUMENT_TYPES, String.class)) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append("'").append(arg_type).append("'"); + } + sb.append("]"); + String lastSeenArgs = sb.toString(); + result.addAll(rs); + rs = + queryAsync( + queryPrefix + + " AND (" + + FUNCTION_NAME + + ", " + + ARGUMENT_TYPES + + ") > (" + + lastSeenFunction + + ", " + + lastSeenArgs + + ")" + + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private void buildFunctions( + KeyspaceMetadata keyspace, + Cluster cluster, + Connection connection, + VersionNumber cassandraVersion, + ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + List raw = fetchFunctions(keyspace, connection, protocolVersion); + Map functions = + buildFunctions(keyspace, raw, cassandraVersion, cluster); + for (FunctionMetadata function : functions.values()) { + keyspace.add(function); + } + } + + private List fetchAggregates( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = + SELECT_AGGREGATES + whereClause(KEYSPACE, keyspace.getName(), null, null); + List result = new ArrayList(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + String lastSeenAggregate = "'" + rs.get(rs.size() - 1).getString(AGGREGATE_NAME) + "'"; + StringBuilder sb = new StringBuilder(); + sb.append("["); + boolean first = true; + for (String arg_type : rs.get(rs.size() - 1).getList(ARGUMENT_TYPES, String.class)) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append("'").append(arg_type).append("'"); + } + sb.append("]"); + String lastSeenArgs = sb.toString(); + result.addAll(rs); + rs = + queryAsync( + queryPrefix + + " AND (" + + AGGREGATE_NAME + + ", " + + ARGUMENT_TYPES + + ") > (" + + lastSeenAggregate + + ", " + + lastSeenArgs + + ")" + + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private void buildAggregates( + KeyspaceMetadata keyspace, + Cluster cluster, + Connection connection, + VersionNumber cassandraVersion, + ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + List raw = fetchAggregates(keyspace, connection, protocolVersion); + Map aggregates = + buildAggregates(keyspace, raw, cassandraVersion, cluster); + for (AggregateMetadata aggregate : aggregates.values()) { + keyspace.add(aggregate); + } + } + + private Map> fetchColumns( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = SELECT_COLUMNS + whereClause(KEYSPACE, keyspace.getName(), null, null); + Map> result = new HashMap>(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + String lastSeenTable = "'" + rs.get(rs.size() - 1).getString(TABLE_NAME) + "'"; + String lastSeenColumn = "'" + rs.get(rs.size() - 1).getString(COLUMN_NAME) + "'"; + for (Row row : rs) { + String cfName = row.getString(TABLE_NAME); + Map colsByCf = result.get(cfName); + if (colsByCf == null) { + colsByCf = new HashMap(); + result.put(cfName, colsByCf); + } + colsByCf.put(row.getString(ColumnMetadata.COLUMN_NAME), row); + } + rs = + queryAsync( + queryPrefix + + " AND (" + + TABLE_NAME + + ", " + + COLUMN_NAME + + ") > (" + + lastSeenTable + + ", " + + lastSeenColumn + + ")" + + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private Map> buildColumns( + KeyspaceMetadata keyspace, + Connection connection, + VersionNumber cassandraVersion, + ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + Map> raw = fetchColumns(keyspace, connection, protocolVersion); + Map> result = + new HashMap>(); + for (Entry> table : raw.entrySet()) { + Map columns = new HashMap(); + for (Entry column : table.getValue().entrySet()) { + columns.put( + column.getKey(), ColumnMetadata.Raw.fromRow(column.getValue(), cassandraVersion)); + } + result.put(table.getKey(), columns); + } + return result; + } + + private List fetchTables( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = SELECT_TABLES + whereClause(KEYSPACE, keyspace.getName(), null, null); + List result = new ArrayList(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + result.addAll(rs); + String lastSeen = "'" + result.get(result.size() - 1).getString(TABLE_NAME) + "'"; + rs = + queryAsync( + queryPrefix + " AND " + TABLE_NAME + " > " + lastSeen + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private Map> fetchIndexes( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = SELECT_INDEXES + whereClause(KEYSPACE, keyspace.getName(), null, null); + Map> result = Maps.newHashMap(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + String lastSeenTable = "'" + rs.get(rs.size() - 1).getString(TABLE_NAME) + "'"; + String lastSeenIndex = "'" + rs.get(rs.size() - 1).getString(INDEX_NAME) + "'"; + for (Row row : rs) { + String cfName = row.getString(TABLE_NAME); + List rowsByCf = result.get(cfName); + if (rowsByCf == null) { + rowsByCf = Lists.newArrayList(); + result.put(cfName, rowsByCf); + } + rowsByCf.add(row); + } + rs = + queryAsync( + queryPrefix + + " AND (" + + TABLE_NAME + + ", " + + INDEX_NAME + + ") > (" + + lastSeenTable + + ", " + + lastSeenIndex + + ")" + + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private List fetchViews( + KeyspaceMetadata keyspace, Connection connection, ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + String queryPrefix = SELECT_VIEWS + whereClause(KEYSPACE, keyspace.getName(), null, null); + List result = new ArrayList(); + List rs = queryAsync(queryPrefix + LIMIT, connection, protocolVersion).get().all(); + while (!rs.isEmpty()) { + result.addAll(rs); + String lastSeen = "'" + result.get(result.size() - 1).getString(VIEW_NAME) + "'"; + rs = + queryAsync( + queryPrefix + " AND " + VIEW_NAME + " > " + lastSeen + LIMIT, + connection, + protocolVersion) + .get() + .all(); + } + return result; + } + + private void buildTablesIndexesAndViews( + KeyspaceMetadata keyspace, + Cluster cluster, + Connection connection, + VersionNumber cassandraVersion, + ProtocolVersion protocolVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + List cf = fetchTables(keyspace, connection, protocolVersion); + Map> columns = + buildColumns(keyspace, connection, cassandraVersion, protocolVersion); + Map> indexes = fetchIndexes(keyspace, connection, protocolVersion); + Map tables = + buildTables(keyspace, cf, columns, indexes, cassandraVersion, cluster); + for (TableMetadata table : tables.values()) { + keyspace.add(table); + } + List viewsData = fetchViews(keyspace, connection, protocolVersion); + Map views = + buildViews(keyspace, viewsData, columns, cassandraVersion, cluster); + for (MaterializedViewMetadata view : views.values()) { + keyspace.add(view); + } + } + + private Map buildSchema( + Cluster cluster, Connection connection, VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, InterruptedException, + ExecutionException { + ProtocolVersion protocolVersion = + cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + + Map keyspaces = new LinkedHashMap(); + ResultSet keyspacesData = queryAsync(SELECT_KEYSPACES, connection, protocolVersion).get(); + for (Row keyspaceRow : keyspacesData) { + KeyspaceMetadata keyspace = KeyspaceMetadata.build(keyspaceRow, cassandraVersion); + keyspaces.put(keyspace.getName(), keyspace); + } + + for (Entry keyspace : keyspaces.entrySet()) { + buildUDTs(keyspace.getValue(), cluster, connection, cassandraVersion, protocolVersion); + buildFunctions(keyspace.getValue(), cluster, connection, cassandraVersion, protocolVersion); + buildAggregates( + keyspace.getValue(), cluster, connection, cassandraVersion, protocolVersion); + buildTablesIndexesAndViews( + keyspace.getValue(), cluster, connection, cassandraVersion, protocolVersion); + } + + return keyspaces; + } @Override - SystemRows fetchSystemRows( + void refresh( Cluster cluster, SchemaElement targetType, String targetKeyspace, @@ -860,7 +1226,59 @@ SystemRows fetchSystemRows( VersionNumber cassandraVersion) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + if (targetType == null + && cluster.getConfiguration().getQueryOptions().isSchemaQueriesPaged()) { + Map keyspaces = + buildSchema(cluster, connection, cassandraVersion); + Metadata metadata; + try { + metadata = cluster.getMetadata(); + } catch (IllegalStateException e) { + logger.warn("Unable to refresh metadata, cluster has been closed"); + return; + } + metadata.lock.lock(); + try { + updateKeyspaces(metadata, metadata.keyspaces, keyspaces, null); + // If we rebuild all from scratch or have an updated keyspace, rebuild the token map + // since some replication on some keyspace may have changed + metadata.rebuildTokenMap(); + } catch (RuntimeException e) { + // Failure to parse the schema is definitively wrong so log a full-on error, but this + // won't + // generally prevent queries to + // work and this can happen when new Cassandra versions modify stuff in the schema and the + // driver hasn't yet be modified. + // So log, but let things go otherwise. + logger.error( + "Error parsing schema from Cassandra system tables: the schema in Cluster#getMetadata() will appear incomplete or stale", + e); + } finally { + metadata.lock.unlock(); + } + } else { + super.refresh( + cluster, + targetType, + targetKeyspace, + targetName, + targetSignature, + connection, + cassandraVersion); + } + } + @Override + SystemRows fetchSystemRows( + Cluster cluster, + SchemaElement targetType, + String targetKeyspace, + String targetName, + List targetSignature, + Connection connection, + VersionNumber cassandraVersion) + throws ConnectionException, BusyConnectionException, ExecutionException, + InterruptedException { boolean isSchemaOrKeyspace = (targetType == null || targetType == KEYSPACE); ResultSetFuture ksFuture = null, @@ -957,7 +1375,7 @@ String tableNameColumn() { return TABLE_NAME; } - protected String whereClause( + protected static String whereClause( SchemaElement targetType, String targetKeyspace, String targetName, diff --git a/driver-core/src/main/java/com/datastax/driver/core/ShardingInfo.java b/driver-core/src/main/java/com/datastax/driver/core/ShardingInfo.java index 192070b4aa7..319a9ec6589 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ShardingInfo.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ShardingInfo.java @@ -25,18 +25,29 @@ public class ShardingInfo { private static final String SCYLLA_PARTITIONER = "SCYLLA_PARTITIONER"; private static final String SCYLLA_SHARDING_ALGORITHM = "SCYLLA_SHARDING_ALGORITHM"; private static final String SCYLLA_SHARDING_IGNORE_MSB = "SCYLLA_SHARDING_IGNORE_MSB"; + private static final String SCYLLA_SHARD_AWARE_PORT = "SCYLLA_SHARD_AWARE_PORT"; + private static final String SCYLLA_SHARD_AWARE_PORT_SSL = "SCYLLA_SHARD_AWARE_PORT_SSL"; private final int shardsCount; private final String partitioner; private final String shardingAlgorithm; private final int shardingIgnoreMSB; + private final int shardAwarePort; + private final int shardAwarePortSSL; private ShardingInfo( - int shardsCount, String partitioner, String shardingAlgorithm, int shardingIgnoreMSB) { + int shardsCount, + String partitioner, + String shardingAlgorithm, + int shardingIgnoreMSB, + int shardAwarePort, + int shardAwarePortSSL) { this.shardsCount = shardsCount; this.partitioner = partitioner; this.shardingAlgorithm = shardingAlgorithm; this.shardingIgnoreMSB = shardingIgnoreMSB; + this.shardAwarePort = shardAwarePort; + this.shardAwarePortSSL = shardAwarePortSSL; } public int getShardsCount() { @@ -55,6 +66,10 @@ public int shardId(Token t) { return (int) (sum >>> 32); } + public int getShardAwarePort(boolean isSSLUsed) { + return isSSLUsed ? shardAwarePortSSL : shardAwarePort; + } + public static class ConnectionShardingInfo { public final int shardId; public final ShardingInfo shardingInfo; @@ -71,6 +86,8 @@ public static ConnectionShardingInfo parseShardingInfo(Map> String partitioner = parseString(params, SCYLLA_PARTITIONER); String shardingAlgorithm = parseString(params, SCYLLA_SHARDING_ALGORITHM); Integer shardingIgnoreMSB = parseInt(params, SCYLLA_SHARDING_IGNORE_MSB); + Integer shardAwarePort = parseInt(params, SCYLLA_SHARD_AWARE_PORT); + Integer shardAwarePortSSL = parseInt(params, SCYLLA_SHARD_AWARE_PORT_SSL); if (shardId == null || shardsCount == null || partitioner == null @@ -80,8 +97,23 @@ public static ConnectionShardingInfo parseShardingInfo(Map> || !shardingAlgorithm.equals("biased-token-round-robin")) { return null; } + if (shardAwarePort == null) { + shardAwarePort = 0; + } + + if (shardAwarePortSSL == null) { + shardAwarePortSSL = 0; + } + return new ConnectionShardingInfo( - shardId, new ShardingInfo(shardsCount, partitioner, shardingAlgorithm, shardingIgnoreMSB)); + shardId, + new ShardingInfo( + shardsCount, + partitioner, + shardingAlgorithm, + shardingIgnoreMSB, + shardAwarePort, + shardAwarePortSSL)); } private static String parseString(Map> params, String key) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java index b7f613d569f..182c4ac3940 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java @@ -47,6 +47,7 @@ private static int streamIdSizeFor(ProtocolVersion version) { case V3: case V4: case V5: + case V6: return 2; default: throw version.unsupported(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java b/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java index 58e9ef47a8c..60055e2d09c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java +++ b/driver-core/src/main/java/com/datastax/driver/core/VersionNumber.java @@ -34,7 +34,7 @@ public class VersionNumber implements Comparable { private static final String VERSION_REGEXP = - "(\\d+)\\.(\\d+)(\\.\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:\\-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; + "(\\d+)\\.(\\d+)(\\.(?:rc)?\\d+)?(\\.\\d+)?([~\\-]\\w[.\\w]*(?:\\-\\w[.\\w]*)*)?(\\+[.\\w]+)?"; private static final Pattern pattern = Pattern.compile(VERSION_REGEXP); private final int major; @@ -79,8 +79,9 @@ public static VersionNumber parse(String version) { int minor = Integer.parseInt(matcher.group(2)); String pa = matcher.group(3); + boolean isRC = pa != null && pa.startsWith(".rc"); // Detect Scylla naming convention: X.Y.rcZ int patch = - pa == null || pa.isEmpty() + pa == null || pa.isEmpty() || isRC ? 0 : Integer.parseInt( pa.substring(1)); // dropping the initial '.' since it's included this time @@ -94,10 +95,12 @@ public static VersionNumber parse(String version) { String pr = matcher.group(5); String[] preReleases = - pr == null || pr.isEmpty() - ? null - : pr.substring(1) - .split("\\-"); // drop initial '-' or '~' then split on the remaining ones + isRC + ? new String[] {pa.substring(1)} + : pr == null || pr.isEmpty() + ? null + : pr.substring(1) + .split("\\-"); // drop initial '-' or '~' then split on the remaining ones String bl = matcher.group(6); String build = bl == null || bl.isEmpty() ? null : bl.substring(1); // drop the initial '+' diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CASWriteUnknownException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CASWriteUnknownException.java new file mode 100644 index 00000000000..751a403991d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CASWriteUnknownException.java @@ -0,0 +1,91 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.EndPoint; + +public class CASWriteUnknownException extends QueryConsistencyException { + + private static final long serialVersionUID = 0; + + /** + * This constructor should only be used internally by the driver when decoding error responses. + */ + public CASWriteUnknownException(ConsistencyLevel consistency, int received, int required) { + this(null, consistency, received, required); + } + + public CASWriteUnknownException( + EndPoint endPoint, ConsistencyLevel consistency, int received, int required) { + super( + endPoint, + String.format( + "CAS operation result is unknown - proposal was not accepted by a quorum. (%d / %d)", + received, required), + consistency, + received, + required); + } + + private CASWriteUnknownException( + EndPoint endPoint, + String msg, + Throwable cause, + ConsistencyLevel consistency, + int received, + int required) { + super(endPoint, msg, cause, consistency, received, required); + } + + @Override + public CASWriteUnknownException copy() { + return new CASWriteUnknownException( + getEndPoint(), + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } + + /** + * Create a copy of this exception with a nicer stack trace, and including the coordinator address + * that caused this exception to be raised. + * + *

This method is mainly intended for internal use by the driver and exists mainly because: + * + *

    + *
  1. the original exception was decoded from a response frame and at that time, the + * coordinator address was not available; and + *
  2. the newly-created exception will refer to the current thread in its stack trace, which + * generally yields a more user-friendly stack trace that the original one. + *
+ * + * @param endPoint The full address of the host that caused this exception to be thrown. + * @return a copy/clone of this exception, but with the given host address instead of the original + * one. + */ + public CASWriteUnknownException copy(EndPoint endPoint) { + return new CASWriteUnknownException( + endPoint, + getMessage(), + this, + getConsistencyLevel(), + getReceivedAcknowledgements(), + getRequiredAcknowledgements()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/CDCWriteException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CDCWriteException.java new file mode 100644 index 00000000000..f4ac21c1663 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/CDCWriteException.java @@ -0,0 +1,61 @@ +/* + * Copyright DataStax, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.EndPoint; +import java.net.InetAddress; +import java.net.InetSocketAddress; + +/** An error occurred when trying to write a CDC mutation to the commitlog * */ +public class CDCWriteException extends QueryExecutionException implements CoordinatorException { + + private static final long serialVersionUID = 0; + + private final EndPoint endPoint; + + public CDCWriteException(EndPoint endPoint, String message) { + super(message); + this.endPoint = endPoint; + } + + /** Private constructor used solely when copying exceptions. */ + private CDCWriteException(EndPoint endPoint, String message, CDCWriteException cause) { + super(message, cause); + this.endPoint = endPoint; + } + + @Override + public EndPoint getEndPoint() { + return endPoint; + } + + @Override + @Deprecated + public InetSocketAddress getAddress() { + return (endPoint == null) ? null : endPoint.resolve(); + } + + @Override + @Deprecated + public InetAddress getHost() { + return (endPoint == null) ? null : endPoint.resolve().getAddress(); + } + + @Override + public CDCWriteException copy() { + return new CDCWriteException(endPoint, getMessage(), this); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java index b2bf6f3f416..71a266fdd19 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java @@ -42,7 +42,8 @@ public ReadTimeoutException( super( endPoint, String.format( - "Cassandra timeout during read query at consistency %s (%s)", + "Cassandra timeout during read query at consistency %s (%s). " + + "In case this was generated during read repair, the consistency level is not representative of the actual consistency.", consistency, formatDetails(received, required, dataPresent)), consistency, received, diff --git a/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java b/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java index 3a9065c042f..098164caa88 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AsyncQueryTest.java @@ -30,7 +30,6 @@ import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Function; import com.google.common.base.Throwables; -import com.google.common.collect.Lists; import com.google.common.util.concurrent.AsyncFunction; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; @@ -92,13 +91,7 @@ public void cancelled_query_should_release_the_connection() throws InterruptedEx public void should_init_cluster_and_session_if_needed() throws Exception { // For this test we need an uninitialized cluster, so we can't reuse the one provided by the // parent class. Rebuild a new one with the same (unique) host. - Host host = cluster().getMetadata().allHosts().iterator().next(); - - Cluster cluster2 = - register( - Cluster.builder() - .addContactPointsWithPorts(Lists.newArrayList(host.getEndPoint().resolve())) - .build()); + Cluster cluster2 = register(createClusterBuilder().build()); try { Session session2 = cluster2.newSession(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java b/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java index 03d7e7c6752..94a64adc2a7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/AuthenticationTest.java @@ -61,12 +61,7 @@ public void sleepIf12() { @Test(groups = "short") public void should_connect_with_credentials() { PlainTextAuthProvider authProvider = spy(new PlainTextAuthProvider("cassandra", "cassandra")); - Cluster cluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withAuthProvider(authProvider) - .build(); + Cluster cluster = createClusterBuilder().withAuthProvider(authProvider).build(); cluster.connect(); verify(authProvider, atLeastOnce()) .newAuthenticator( @@ -86,13 +81,7 @@ public void should_connect_with_credentials() { */ @Test(groups = "short") public void should_fail_to_connect_with_wrong_credentials() { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCredentials("bogus", "bogus") - .build()); + Cluster cluster = register(createClusterBuilder().withCredentials("bogus", "bogus").build()); try { cluster.connect(); @@ -114,12 +103,7 @@ public void should_fail_to_connect_with_wrong_credentials() { @Test(groups = "short", expectedExceptions = AuthenticationException.class) public void should_fail_to_connect_without_credentials() { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster = register(createClusterBuilder().build()); cluster.connect(); } @@ -132,9 +116,7 @@ public void should_fail_to_connect_without_credentials() { @CCMConfig(dirtiesContext = true) public void should_connect_with_slow_server() { Cluster cluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withAuthProvider(new SlowAuthProvider()) .withPoolingOptions(new PoolingOptions().setHeartbeatIntervalSeconds(1)) .build(); @@ -177,13 +159,7 @@ public void run() { @Test(groups = "short") public void should_not_create_pool_with_wrong_credentials() { PlainTextAuthProvider authProvider = new PlainTextAuthProvider("cassandra", "cassandra"); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withAuthProvider(authProvider) - .build()); + Cluster cluster = register(createClusterBuilder().withAuthProvider(authProvider).build()); cluster.init(); authProvider.setPassword("wrong"); Level previous = TestUtils.setLogLevel(Session.class, Level.WARN); diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java b/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java index c3bcc00e8d5..b722025a266 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMAccess.java @@ -17,7 +17,9 @@ import java.io.Closeable; import java.io.File; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.util.List; import java.util.Map; public interface CCMAccess extends Closeable { @@ -41,8 +43,6 @@ enum Workload { * is assumed that this value is only used for representing the compatible Cassandra version for * that DSE version. * - *

- * * @return The version of this CCM cluster. */ VersionNumber getCassandraVersion(); @@ -50,8 +50,6 @@ enum Workload { /** * Returns the DSE version of this CCM cluster if this is a DSE cluster, otherwise null. * - *

- * * @return The version of this CCM cluster. */ VersionNumber getDSEVersion(); @@ -87,13 +85,29 @@ enum Workload { void setKeepLogs(boolean keepLogs); /** - * @return the node count for each datacenter, mapped in the corresponding cell of the returned - * int array. This is the count that was passed at initialization (that is, the argument to - * {@link CCMBridge.Builder#withNodes(int...)} or {@link CCMConfig#numberOfNodes()}). Note - * that it will NOT be updated dynamically if nodes are added or removed at runtime. + * Returns the node count for each datacenter, mapped in the corresponding cell of the returned + * int array. + * + *

This is the count that was passed at initialization (that is, the argument to {@link + * CCMBridge.Builder#withNodes(int...)} or {@link CCMConfig#numberOfNodes()}). Note that it will + * NOT be updated dynamically if nodes are added or removed at runtime. + * + * @return the node count for each datacenter. */ int[] getNodeCount(); + /** + * Returns the contact points to use to contact the CCM cluster. + * + *

This reflects the initial number of nodes in the cluster, as configured at initialization + * (that is, the argument to {@link CCMBridge.Builder#withNodes(int...)} or {@link + * CCMConfig#numberOfNodes()}). Note that it will NOT be updated dynamically if nodes are + * added or removed at runtime. + * + * @return the contact points to use to contact the CCM cluster. + */ + List getContactPoints(); + /** * Returns the address of the {@code nth} host in the CCM cluster (counting from 1, i.e., {@code * addressOfNode(1)} returns the address of the first node. diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index b80f6dcb9b1..6d085b36bea 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -41,11 +41,15 @@ import java.io.OutputStream; import java.io.PrintWriter; import java.io.StringWriter; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -73,13 +77,13 @@ public class CCMBridge implements CCMAccess { private static final Set CASSANDRA_INSTALL_ARGS; - public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_CLIENT_TRUSTSTORE_PASSWORD = "scylla1sfun"; public static final String DEFAULT_CLIENT_TRUSTSTORE_PATH = "/client.truststore"; public static final File DEFAULT_CLIENT_TRUSTSTORE_FILE = createTempStore(DEFAULT_CLIENT_TRUSTSTORE_PATH); - public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_CLIENT_KEYSTORE_PASSWORD = "scylla1sfun"; public static final String DEFAULT_CLIENT_KEYSTORE_PATH = "/client.keystore"; public static final File DEFAULT_CLIENT_KEYSTORE_FILE = @@ -89,17 +93,29 @@ public class CCMBridge implements CCMAccess { public static final File DEFAULT_CLIENT_PRIVATE_KEY_FILE = createTempStore("/client.key"); public static final File DEFAULT_CLIENT_CERT_CHAIN_FILE = createTempStore("/client.crt"); - public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_SERVER_TRUSTSTORE_PASSWORD = "scylla1sfun"; public static final String DEFAULT_SERVER_TRUSTSTORE_PATH = "/server.truststore"; + public static final String DEFAULT_SERVER_TRUSTSTORE_PEM_PATH = "/server.truststore.pem"; + private static final File DEFAULT_SERVER_TRUSTSTORE_FILE = createTempStore(DEFAULT_SERVER_TRUSTSTORE_PATH); + private static final File DEFAULT_SERVER_TRUSTSTORE_PEM_FILE = + createTempStore(DEFAULT_SERVER_TRUSTSTORE_PEM_PATH); - public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "cassandra1sfun"; + public static final String DEFAULT_SERVER_KEYSTORE_PASSWORD = "scylla1sfun"; public static final String DEFAULT_SERVER_KEYSTORE_PATH = "/server.keystore"; + // Contain the same keypair as the server keystore, but in format usable by Scylla + public static final String DEFAULT_SERVER_PRIVATE_KEY_PATH = "/server.key"; + public static final String DEFAULT_SERVER_CERT_CHAIN_PATH = "/server.crt"; + private static final File DEFAULT_SERVER_KEYSTORE_FILE = createTempStore(DEFAULT_SERVER_KEYSTORE_PATH); + private static final File DEFAULT_SERVER_PRIVATE_KEY_FILE = + createTempStore(DEFAULT_SERVER_PRIVATE_KEY_PATH); + private static final File DEFAULT_SERVER_CERT_CHAIN_FILE = + createTempStore(DEFAULT_SERVER_CERT_CHAIN_PATH); /** * The environment variables to use when invoking CCM. Inherits the current processes environment, @@ -181,7 +197,8 @@ public class CCMBridge implements CCMAccess { String installDirectory = System.getProperty("cassandra.directory"); String branch = System.getProperty("cassandra.branch"); - + // Inherit the current environment. + Map envMap = Maps.newHashMap(new ProcessBuilder().environment()); ImmutableSet.Builder installArgs = ImmutableSet.builder(); if (installDirectory != null && !installDirectory.trim().isEmpty()) { installArgs.add("--install-dir=" + new File(installDirectory).getAbsolutePath()); @@ -190,6 +207,12 @@ public class CCMBridge implements CCMAccess { } else if (inputScyllaVersion != null && !inputScyllaVersion.trim().isEmpty()) { installArgs.add(" --scylla "); installArgs.add("-v release:" + inputScyllaVersion); + + // Detect Scylla Enterprise - it should start with + // a 4-digit year. + if (inputScyllaVersion.matches("\\d{4}\\..*")) { + envMap.put("SCYLLA_PRODUCT", "enterprise"); + } } else if (inputCassandraVersion != null && !inputCassandraVersion.trim().isEmpty()) { installArgs.add("-v " + inputCassandraVersion); } @@ -200,8 +223,6 @@ public class CCMBridge implements CCMAccess { CASSANDRA_INSTALL_ARGS = installArgs.build(); - // Inherit the current environment. - Map envMap = Maps.newHashMap(new ProcessBuilder().environment()); // If ccm.path is set, override the PATH variable with it. String ccmPath = System.getProperty("ccm.path"); if (ccmPath != null) { @@ -387,6 +408,24 @@ public int[] getNodeCount() { return Arrays.copyOf(nodes, nodes.length); } + @Override + public List getContactPoints() { + List contactPoints = new ArrayList(); + int n = 1; + for (int dc = 1; dc <= nodes.length; dc++) { + int nodesInDc = nodes[dc - 1]; + for (int i = 0; i < nodesInDc; i++) { + try { + contactPoints.add(InetAddress.getByName(ipOfNode(n))); + } catch (UnknownHostException e) { + Throwables.propagate(e); + } + n++; + } + } + return contactPoints; + } + protected String ipOfNode(int n) { return ipPrefix + n; } @@ -398,7 +437,11 @@ public InetSocketAddress addressOfNode(int n) { @Override public InetSocketAddress jmxAddressOfNode(int n) { - return new InetSocketAddress("localhost", jmxPorts[n - 1]); + if (GLOBAL_SCYLLA_VERSION_NUMBER != null) { + return new InetSocketAddress(ipOfNode(n), jmxPorts[n - 1]); + } else { + return new InetSocketAddress("localhost", jmxPorts[n - 1]); + } } @Override @@ -819,8 +862,10 @@ public ProtocolVersion getProtocolVersion() { return ProtocolVersion.V2; } else if (version.compareTo(VersionNumber.parse("2.2")) < 0) { return ProtocolVersion.V3; - } else { + } else if (version.compareTo(VersionNumber.parse("4.0")) < 0) { return ProtocolVersion.V4; + } else { + return ProtocolVersion.V5; } } @@ -922,10 +967,19 @@ public Builder withoutNodes() { /** Enables SSL encryption. */ public Builder withSSL() { cassandraConfiguration.put("client_encryption_options.enabled", "true"); - cassandraConfiguration.put( - "client_encryption_options.keystore", DEFAULT_SERVER_KEYSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put( - "client_encryption_options.keystore_password", DEFAULT_SERVER_KEYSTORE_PASSWORD); + if (GLOBAL_SCYLLA_VERSION_NUMBER != null) { + cassandraConfiguration.put( + "client_encryption_options.certificate", + DEFAULT_SERVER_CERT_CHAIN_FILE.getAbsolutePath()); + cassandraConfiguration.put( + "client_encryption_options.keyfile", DEFAULT_SERVER_PRIVATE_KEY_FILE.getAbsolutePath()); + } else { + cassandraConfiguration.put("client_encryption_options.optional", "false"); + cassandraConfiguration.put( + "client_encryption_options.keystore", DEFAULT_SERVER_KEYSTORE_FILE.getAbsolutePath()); + cassandraConfiguration.put( + "client_encryption_options.keystore_password", DEFAULT_SERVER_KEYSTORE_PASSWORD); + } return this; } @@ -933,10 +987,17 @@ public Builder withSSL() { public Builder withAuth() { withSSL(); cassandraConfiguration.put("client_encryption_options.require_client_auth", "true"); - cassandraConfiguration.put( - "client_encryption_options.truststore", DEFAULT_SERVER_TRUSTSTORE_FILE.getAbsolutePath()); - cassandraConfiguration.put( - "client_encryption_options.truststore_password", DEFAULT_SERVER_TRUSTSTORE_PASSWORD); + if (GLOBAL_SCYLLA_VERSION_NUMBER != null) { + cassandraConfiguration.put( + "client_encryption_options.truststore", + DEFAULT_SERVER_TRUSTSTORE_PEM_FILE.getAbsolutePath()); + } else { + cassandraConfiguration.put( + "client_encryption_options.truststore", + DEFAULT_SERVER_TRUSTSTORE_FILE.getAbsolutePath()); + cassandraConfiguration.put( + "client_encryption_options.truststore_password", DEFAULT_SERVER_TRUSTSTORE_PASSWORD); + } return this; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java b/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java index e8682003c7f..26bbaeed031 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMCache.java @@ -23,8 +23,10 @@ import com.google.common.cache.RemovalNotification; import com.google.common.cache.Weigher; import java.io.File; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; @@ -107,6 +109,11 @@ public int[] getNodeCount() { return ccm.getNodeCount(); } + @Override + public List getContactPoints() { + return ccm.getContactPoints(); + } + @Override public InetSocketAddress addressOfNode(int n) { return ccm.addressOfNode(n); diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java b/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java index e2e5c3729d7..c92f9252e75 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMTestsSupport.java @@ -29,6 +29,7 @@ import static org.assertj.core.api.Assertions.fail; import com.datastax.driver.core.CCMAccess.Workload; +import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.CreateCCM.TestMode; import com.datastax.driver.core.exceptions.InvalidQueryException; import com.google.common.base.Throwables; @@ -163,6 +164,11 @@ public int[] getNodeCount() { return delegate.getNodeCount(); } + @Override + public List getContactPoints() { + return delegate.getContactPoints(); + } + @Override public String checkForErrors() { return delegate.checkForErrors(); @@ -637,7 +643,7 @@ public void beforeTestClass(Object testInstance) throws Exception { } catch (Exception e) { LOGGER.error(e.getMessage(), e); errorOut(); - fail(e.getMessage()); + throw e; } } } @@ -712,18 +718,15 @@ public void afterTestClass() throws Exception { /** * Returns the cluster builder to use for this test. * - *

The default implementation returns a vanilla builder. - * - *

It's not required to call {@link - * com.datastax.driver.core.Cluster.Builder#addContactPointsWithPorts}, it will be done - * automatically. + *

The default implementation returns a vanilla builder with contact points and port that match + * the running CCM cluster. Therefore it's not required to call {@link + * Cluster.Builder#addContactPointsWithPorts}, it will be done automatically. * * @return The cluster builder to use for the tests. */ public Cluster.Builder createClusterBuilder() { - return Cluster.builder() - // use a different codec registry for each cluster instance - .withCodecRegistry(new CodecRegistry()); + Cluster.Builder builder = Cluster.builder(); + return configureClusterBuilder(builder); } /** @@ -736,7 +739,18 @@ public Cluster.Builder createClusterBuilder() { * @return The cluster builder to use for the tests. */ public Cluster.Builder createClusterBuilderNoDebouncing() { - return Cluster.builder().withQueryOptions(TestUtils.nonDebouncingQueryOptions()); + return createClusterBuilder().withQueryOptions(TestUtils.nonDebouncingQueryOptions()); + } + + /** + * Configures the builder with contact points and port that match the running CCM cluster. + * Therefore it's not required to call {@link Cluster.Builder#addContactPointsWithPorts}, it will + * be done automatically. + * + * @return The cluster builder (for method chaining). + */ + protected Builder configureClusterBuilder(Builder builder) { + return TestUtils.configureClusterBuilder(builder, ccm()); } /** @@ -969,6 +983,12 @@ protected void initTestCluster(Object testInstance) throws Exception { // add contact points only if the provided builder didn't do so if (builder.getContactPoints().isEmpty()) builder.addContactPoints(getContactPoints()); builder.withPort(ccm.getBinaryPort()); + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.10")) >= 0 + && ccm().getCassandraVersion().compareTo(VersionNumber.parse("4.0-beta5")) < 0) { + // prevent usage of protocol v5 for 3.10 and 3.11 since these versions have the beta + // version of it + builder.withProtocolVersion(ProtocolVersion.V4); + } cluster = register(builder.build()); cluster.init(); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java b/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java index e7a62993b42..f50dac1fb4d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ClusterStressTest.java @@ -174,9 +174,7 @@ private class CreateClusterAndCheckConnections CreateClusterAndCheckConnections(CountDownLatch startSignal) { this.startSignal = startSignal; this.cluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withPoolingOptions( new PoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, 1)) .withNettyOptions(channelMonitor.nettyOptions()) diff --git a/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java b/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java index 06d5b711f38..225f1bd253d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ControlConnectionTest.java @@ -17,9 +17,11 @@ import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; +import static com.datastax.driver.core.ScassandraCluster.SELECT_LOCAL; import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS; +import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS_DSE68; +import static com.datastax.driver.core.ScassandraCluster.SELECT_PEERS_V2; import static com.datastax.driver.core.ScassandraCluster.datacenter; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; import static com.datastax.driver.core.TestUtils.nonQuietClusterCloseOptions; import static com.google.common.collect.Lists.newArrayList; import static org.scassandra.http.client.PrimingRequest.then; @@ -31,6 +33,8 @@ import com.datastax.driver.core.policies.ReconnectionPolicy; import com.datastax.driver.core.utils.CassandraVersion; import com.google.common.base.Function; +import com.google.common.base.Optional; +import com.google.common.collect.Collections2; import com.google.common.collect.HashMultiset; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -38,15 +42,20 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.Map; +import java.util.Random; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.log4j.Level; +import org.scassandra.http.client.PrimingClient; import org.scassandra.http.client.PrimingRequest; +import org.scassandra.http.client.Result; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.DataProvider; @@ -79,9 +88,7 @@ public void should_prevent_simultaneous_reconnection_attempts() throws Interrupt // this host Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withReconnectionPolicy(reconnectionPolicy) .withLoadBalancingPolicy(loadBalancingPolicy) .build()); @@ -109,12 +116,7 @@ public void should_prevent_simultaneous_reconnection_attempts() throws Interrupt @CassandraVersion("2.1.0") public void should_parse_UDT_definitions_when_using_default_protocol_version() { // First driver instance: create UDT - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster = register(createClusterBuilder().build()); Session session = cluster.connect(); session.execute( "create keyspace ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}"); @@ -122,12 +124,7 @@ public void should_parse_UDT_definitions_when_using_default_protocol_version() { cluster.close(); // Second driver instance: read UDT definition - Cluster cluster2 = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster2 = register(createClusterBuilder().build()); UserType fooType = cluster2.getMetadata().getKeyspace("ks").getUserType("foo"); assertThat(fooType.getFieldNames()).containsExactly("i"); @@ -146,13 +143,7 @@ public void should_parse_UDT_definitions_when_using_default_protocol_version() { @CCMConfig(numberOfNodes = 3) public void should_reestablish_if_control_node_decommissioned() throws InterruptedException { InetSocketAddress firstHost = ccm().addressOfNode(1); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(firstHost.getAddress()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .build()); + Cluster cluster = register(createClusterBuilderNoDebouncing().build()); cluster.init(); // Ensure the control connection host is that of the first node. @@ -309,7 +300,9 @@ static void run_with_null_peer_info(String columns, boolean expectPeer2, boolean } StringBuilder columnDataBuilder = new StringBuilder(); - for (String column : columns.split(",")) { + String[] splitColumns = columns.split(","); + Set splitColumnsSet = new HashSet(Arrays.asList(splitColumns)); + for (String column : splitColumns) { builder = builder.forcePeerInfo(1, 2, column, null); columnDataBuilder.append(String.format("%s=null, ", column)); } @@ -344,11 +337,31 @@ static void run_with_null_peer_info(String columns, boolean expectPeer2, boolean cluster.init(); InetAddress node2Address = scassandraCluster.address(2).getAddress(); - String expectedError = - String.format( - "Found invalid row in system.peers: [peer=%s, %s]. " - + "This is likely a gossip or snitch issue, this host will be ignored.", - node2Address, columnData); + String expectedError; + // Based on ControlConnection::formatInvalidPeer + if (withPeersV2) { + expectedError = + String.format( + "Found invalid row in system.peers: [peer=%s, %s]. " + + "This is likely a gossip or snitch issue, this host will be ignored.", + node2Address, columnData); + } else { + expectedError = + String.format( + "Found invalid row in system.peers: [peer=%s, %s%s%s%s]. " + + "This is likely a gossip or snitch issue, this host will be ignored.", + node2Address, + !splitColumnsSet.contains("native_transport_address") + ? "missing native_transport_address, " + : "", + !splitColumnsSet.contains("native_transport_port") + ? "missing native_transport_port, " + : "", + !splitColumnsSet.contains("native_transport_port_ssl") + ? "missing native_transport_port_ssl, " + : "", + columnData); + } String log = logs.get(); // then: A peer with a null rack should not show up in host metadata, unless allowed via // system property. @@ -579,6 +592,294 @@ public void should_connect_when_peers_v2_table_not_present() { } } + /** + * Cassandra 4.0 supports native_address and native_port columns in system.peers_v2. We want to + * validate our ability to build correct metadata when drawing data from these tables. + */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_native_address_port_from_peersv2() + throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + int expectedPort = 2409; + PeerRowState state = + PeerRowState.builder() + .peersV2("native_address", expectedAddress) + .peersV2("native_port", expectedPort) + .expectedAddress(expectedAddress) + .expectedPort(expectedPort) + .build(); + runPeerTest(state); + } + + /** DSE 6.8 includes native_transport_address and native_transport_port in system.peers. */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_native_transport_address_port_from_peers() + throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + int expectedPort = 2409; + PeerRowState state = + PeerRowState.builder() + .peers("native_transport_address", expectedAddress) + .peers("native_transport_port", expectedPort) + .expectedAddress(expectedAddress) + .expectedPort(expectedPort) + .build(); + runPeerTest(state); + } + + /** + * If both native_transport_port and native_transport_port_ssl are present we expect the latter to + * be selected if the Cluster is created with SSL support (i.e. if {@link + * Cluster.Builder#withSSL()} is used). + */ + @Test(groups = "short", enabled = false /* Requires SSL support in scassandra */) + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_native_transport_address_port_ssl_from_peers() + throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + int expectedPort = 2409; + PeerRowState state = + PeerRowState.builder() + .peers("native_transport_address", expectedAddress) + .peers("native_transport_port", expectedPort - 100) + .peers("native_transport_port_ssl", expectedPort) + .expectedAddress(expectedAddress) + .expectedPort(expectedPort) + .build(); + runPeerTest(state); + } + + /** + * The default case. If we can't get native_address/port out of system.peers_v2 or + * native_transport_address/port out of system.peers the fall back to rpc_address + a default port + */ + @Test(groups = "short") + @CCMConfig(createCcm = false) + public void should_extract_hosts_using_rpc_address_from_peers() throws UnknownHostException { + + InetAddress expectedAddress = InetAddress.getByName("4.3.2.1"); + PeerRowState state = + PeerRowState.builder() + .peers("rpc_address", expectedAddress) + /* DefaultEndPointFactory isn't happy if we don't have a value for + * both peer and rpc_address */ + .peers("peer", InetAddress.getByName("1.2.3.4")) + .expectedAddress(expectedAddress) + .build(); + runPeerTest(state); + } + + private void runPeerTest(PeerRowState state) { + + ScassandraCluster scassandras = + ScassandraCluster.builder().withNodes(2).withPeersV2(state.usePeersV2()).build(); + scassandras.init(); + + Cluster cluster = null; + try { + + scassandras.node(1).primingClient().clearAllPrimes(); + + PrimingClient primingClient = scassandras.node(1).primingClient(); + + /* Note that we always prime system.local; ControlConnection.refreshNodeAndTokenMap() gets angry + * if this is empty */ + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.local WHERE key='local'") + .withThen(then().withColumnTypes(SELECT_LOCAL).withRows(state.getLocalRow()).build()) + .build()); + + if (state.shouldPrimePeers()) { + + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers") + .withThen( + then() + .withColumnTypes(state.isDse68() ? SELECT_PEERS_DSE68 : SELECT_PEERS) + .withRows(state.getPeersRow()) + .build()) + .build()); + } + if (state.shouldPrimePeersV2()) { + + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen( + then().withColumnTypes(SELECT_PEERS_V2).withRows(state.getPeersV2Row()).build()) + .build()); + } else { + + /* Must return an error code in this case in order to trigger the driver's downgrade to system.peers */ + primingClient.prime( + PrimingRequest.queryBuilder() + .withQuery("SELECT * FROM system.peers_v2") + .withThen(then().withResult(Result.invalid).build())); + } + + cluster = + Cluster.builder() + .addContactPoints(scassandras.address(1).getAddress()) + .withPort(scassandras.getBinaryPort()) + .withNettyOptions(nonQuietClusterCloseOptions) + .build(); + cluster.connect(); + + Collection hostEndPoints = + Collections2.transform( + cluster.getMetadata().allHosts(), + new Function() { + public EndPoint apply(Host host) { + return host.getEndPoint(); + } + }); + assertThat(hostEndPoints).contains(state.getExpectedEndPoint(scassandras)); + } finally { + if (cluster != null) cluster.close(); + scassandras.stop(); + } + } + + static class PeerRowState { + + private final ImmutableMap peers; + private final ImmutableMap peersV2; + private final ImmutableMap local; + + private final InetAddress expectedAddress; + private final Optional expectedPort; + + private final boolean shouldPrimePeers; + private final boolean shouldPrimePeersV2; + + private PeerRowState( + ImmutableMap peers, + ImmutableMap peersV2, + ImmutableMap local, + InetAddress expectedAddress, + Optional expectedPort, + boolean shouldPrimePeers, + boolean shouldPrimePeersV2) { + this.peers = peers; + this.peersV2 = peersV2; + this.local = local; + + this.expectedAddress = expectedAddress; + this.expectedPort = expectedPort; + + this.shouldPrimePeers = shouldPrimePeers; + this.shouldPrimePeersV2 = shouldPrimePeersV2; + } + + public static Builder builder() { + return new Builder(); + } + + public boolean usePeersV2() { + return !this.peersV2.isEmpty(); + } + + public boolean isDse68() { + return this.peers.containsKey("native_transport_address") + || this.peers.containsKey("native_transport_port") + || this.peers.containsKey("native_transport_port_ssl"); + } + + public boolean shouldPrimePeers() { + return this.shouldPrimePeers; + } + + public boolean shouldPrimePeersV2() { + return this.shouldPrimePeersV2; + } + + public ImmutableMap getPeersRow() { + return this.peers; + } + + public ImmutableMap getPeersV2Row() { + return this.peersV2; + } + + public ImmutableMap getLocalRow() { + return this.local; + } + + public EndPoint getExpectedEndPoint(ScassandraCluster cluster) { + return new TranslatedAddressEndPoint( + new InetSocketAddress( + this.expectedAddress, this.expectedPort.or(cluster.getBinaryPort()))); + } + + static class Builder { + + private ImmutableMap.Builder peers = this.basePeerRow(); + private ImmutableMap.Builder peersV2 = this.basePeerRow(); + private ImmutableMap.Builder local = this.basePeerRow(); + + private InetAddress expectedAddress; + private Optional expectedPort = Optional.absent(); + + private boolean shouldPrimePeers = false; + private boolean shouldPrimePeersV2 = false; + + public PeerRowState build() { + return new PeerRowState( + this.peers.build(), + this.peersV2.build(), + this.local.build(), + this.expectedAddress, + this.expectedPort, + this.shouldPrimePeers, + this.shouldPrimePeersV2); + } + + public Builder peers(String name, Object val) { + this.peers.put(name, val); + this.shouldPrimePeers = true; + return this; + } + + public Builder peersV2(String name, Object val) { + this.peersV2.put(name, val); + this.shouldPrimePeersV2 = true; + return this; + } + + public Builder local(String name, Object val) { + this.local.put(name, val); + return this; + } + + public Builder expectedAddress(InetAddress address) { + this.expectedAddress = address; + return this; + } + + public Builder expectedPort(int port) { + this.expectedPort = Optional.of(port); + return this; + } + + private ImmutableMap.Builder basePeerRow() { + return ImmutableMap.builder() + /* Required to support Metadata.addIfAbsent(Host) which is used by host loading code */ + .put("host_id", UUID.randomUUID()) + /* Elements below required to pass peer row validation */ + .put("data_center", datacenter(1)) + .put("rack", "rack1") + .put("tokens", ImmutableSet.of(Long.toString(new Random().nextLong()))); + } + } + } + static class QueryPlanCountingPolicy extends DelegatingLoadBalancingPolicy { final AtomicInteger counter = new AtomicInteger(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java index aa6e8c6bc30..5e1c1bc1561 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/EventDebouncerIntegrationTest.java @@ -47,12 +47,7 @@ public void should_wait_until_load_balancing_policy_is_fully_initialized() throws InterruptedException { TestLoadBalancingPolicy policy = new TestLoadBalancingPolicy(); final Cluster cluster = - register( - createClusterBuilderNoDebouncing() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build()); + register(createClusterBuilderNoDebouncing().withLoadBalancingPolicy(policy).build()); new Thread() { @Override public void run() { diff --git a/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java index 5872746cd61..e2a6a8860d0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/GettableDataIntegrationTest.java @@ -58,7 +58,7 @@ public void onTestContextInitialized() { @Override public Cluster.Builder createClusterBuilder() { - return Cluster.builder().withCodecRegistry(registry); + return super.createClusterBuilder().withCodecRegistry(registry); } @BeforeClass(groups = "short") diff --git a/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java b/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java index 6f242ae1592..f44e64dca0c 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/HostConnectionPoolTest.java @@ -27,6 +27,7 @@ import static com.google.common.collect.Lists.newArrayList; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; +import static org.mockito.Matchers.anyInt; import static org.mockito.Mockito.after; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; @@ -511,7 +512,7 @@ public void variable_size_pool_should_fill_its_connections_and_then_reject() thr allRequests.add(MockRequest.send(pool)); // Allow time for new connection to be spawned. - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class), anyInt(), anyInt()); assertPoolSize(pool, 2); // Borrow more and ensure the connection returned is a non-core connection. @@ -562,7 +563,7 @@ public void should_add_extra_connection_when_core_full() throws Exception { allRequests.add(MockRequest.send(pool)); // Reaching the threshold should have triggered the creation of an extra one - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class), anyInt(), anyInt()); assertPoolSize(pool, 2); } finally { MockRequest.completeAll(allRequests); @@ -795,7 +796,7 @@ public void should_trash_on_returning_connection_with_insufficient_streams() thr allRequests.addAll(requests); allRequests.add(MockRequest.send(pool)); - verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class)); + verify(factory, after(2000).times(1)).open(any(HostConnectionPool.class), anyInt(), anyInt()); assertThat(pool.connections[0]).hasSize(2); // Grab the new non-core connection and replace it with a spy. @@ -1197,16 +1198,11 @@ public void should_not_mark_host_down_if_some_connections_fail_on_init() throws Uninterruptibles.sleepUninterruptibly(reconnectInterval, TimeUnit.MILLISECONDS); - // Should open up to core connections, however it will only spawn up to 1 connection - // per request, so we need to make enough requests to make up the deficit. Additionally - // we need to wait for connections to be established between requests for the pool - // to spawn new connections (since it only allows one simultaneous creation). - for (int i = 5; i <= 8; i++) { - allRequests.add(MockRequest.send(pool)); - verify(factory, timeout(readTimeout)).open(any(HostConnectionPool.class)); - reset(factory); - assertPoolSize(pool, i); - } + // Reconnection mechanism should fill missing connections by now. + verify(factory, timeout(readTimeout).times(4)) + .open(any(HostConnectionPool.class), anyInt(), anyInt()); + reset(factory); + assertPoolSize(pool, 8); } finally { MockRequest.completeAll(allRequests); cluster.close(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java index 7282fe2e7cb..8066f18affe 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/Jdk8SSLEncryptionTest.java @@ -62,11 +62,7 @@ public class Jdk8SSLEncryptionTest extends SSLTestBase { * @jira_ticket JAVA-1364 * @since 3.2.0 */ - @Test( - groups = "short", - dataProvider = "sslImplementation", - dataProviderClass = SSLTestBase.class, - enabled = false /* @IntegrationTestDisabledNettyFailure @IntegrationTestDisabledSSL */) + @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) public void should_pass_peer_address_to_engine(SslImplementation sslImplementation) throws Exception { String expectedPeerHost = TestUtils.IP_PREFIX + "1"; diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java index 26e319bb28e..09f8bf8edb8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyBootstrapTest.java @@ -54,9 +54,11 @@ public void should_init_policy_with_up_contact_points() throws Exception { Cluster cluster = register( - Cluster.builder() + createClusterBuilder() + // Manually add contact points, because + // createClusterBuilder only adds the first + // contact point from getContactPoints(). .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) .withLoadBalancingPolicy(policy) .build()); @@ -103,13 +105,7 @@ public void should_send_down_notifications_after_init_when_contact_points_are_do ccm().waitForDown(nodeToStop); HistoryPolicy policy = new HistoryPolicy(new RoundRobinPolicy()); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withLoadBalancingPolicy(policy) - .build()); + Cluster cluster = register(createClusterBuilder().withLoadBalancingPolicy(policy).build()); try { cluster.init(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java b/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java index 44774cc59b3..26e88c5d87e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MetadataTest.java @@ -17,7 +17,6 @@ import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; import static com.datastax.driver.core.TestUtils.waitForUp; import com.google.common.collect.Maps; @@ -57,13 +56,7 @@ public class MetadataTest extends CCMTestsSupport { @Test(groups = "long") @CCMConfig(numberOfNodes = 3, dirtiesContext = true, createCluster = false) public void should_update_metadata_on_topology_change() { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .build()); + Cluster cluster = register(createClusterBuilderNoDebouncing().build()); Session session = cluster.connect(); String keyspace = "test"; diff --git a/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java b/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java index ead5fd52cd9..2c97550a7c9 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MetricsTest.java @@ -36,7 +36,7 @@ public class MetricsTest extends CCMTestsSupport { @Override public Cluster.Builder createClusterBuilder() { - return Cluster.builder() + return super.createClusterBuilder() .withRetryPolicy( new RetryPolicy() { @Override @@ -141,13 +141,7 @@ public void should_enable_metrics_and_jmx_by_default() throws Exception { */ @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) public void metrics_should_be_null_when_metrics_disabled() throws Exception { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withoutMetrics() - .build()); + Cluster cluster = register(createClusterBuilder().withoutMetrics().build()); try { cluster.init(); assertThat(cluster.getMetrics()).isNull(); @@ -169,13 +163,7 @@ public void metrics_should_be_null_when_metrics_disabled() throws Exception { */ @Test(groups = "short", expectedExceptions = InstanceNotFoundException.class) public void should_be_no_jmx_mbean_when_jmx_is_disabled() throws Exception { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withoutJMXReporting() - .build()); + Cluster cluster = register(createClusterBuilder().withoutJMXReporting().build()); try { cluster.init(); assertThat(cluster.getMetrics()).isNotNull(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java b/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java index 8408bb701c4..f778ca94394 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/MissingRpcAddressTest.java @@ -40,12 +40,7 @@ public class MissingRpcAddressTest extends CCMTestsSupport { public void testMissingRpcAddressAtStartup() throws Exception { deleteNode2RpcAddressFromNode1(); // Use only one contact point to make sure that the control connection is on node1 - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster = register(createClusterBuilder().build()); cluster.connect(); // Since node2's RPC address is unknown on our control host, it should have been ignored @@ -58,9 +53,7 @@ private void deleteNode2RpcAddressFromNode1() throws Exception { InetSocketAddress firstHost = ccm().addressOfNode(1); Cluster cluster = register( - Cluster.builder() - .addContactPoints(firstHost.getAddress()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() // ensure we will only connect to node1 .withLoadBalancingPolicy( new WhiteListPolicy( diff --git a/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java index 26bb2c3cf0e..5c7fc1fc4de 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NettyOptionsTest.java @@ -54,9 +54,7 @@ public void should_invoke_netty_options_hooks_single_node() throws Exception { } @CCMConfig(numberOfNodes = 3) - @Test( - groups = "short", - enabled = false /* @IntegrationTestDisabledNettyFailure @IntegrationTestDisabledSSL */) + @Test(groups = "short") public void should_invoke_netty_options_hooks_multi_node() throws Exception { should_invoke_netty_options_hooks(3, 4); } @@ -81,9 +79,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { .afterChannelInitialized(any(SocketChannel.class)); Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withPoolingOptions( new PoolingOptions() .setConnectionsPerHost( @@ -96,7 +92,7 @@ public Object answer(InvocationOnMock invocation) throws Throwable { int expectedNumberOfCalls = TestUtils.numberOfLocalCoreConnections(cluster) * hosts + 1; // If the driver supports a more recent protocol version than C*, the negotiation at startup // will open an additional connection for each protocol version tried. - ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED; + ProtocolVersion version = ProtocolVersion.DEFAULT; ProtocolVersion usedVersion = ccm().getProtocolVersion(); while (version != usedVersion && version != null) { version = version.getLowerSupported(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java index b7445259f1a..aba127ecbf8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NodeListRefreshDebouncerTest.java @@ -42,13 +42,7 @@ public void setup() { queryOptions.setMaxPendingRefreshNodeListRequests(5); queryOptions.setRefreshSchemaIntervalMillis(0); // Create a separate cluster that will receive the schema events on its control connection. - cluster2 = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(queryOptions) - .build()); + cluster2 = register(createClusterBuilder().withQueryOptions(queryOptions).build()); cluster2.init(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java index 6b2c1a87680..c59078d0aaf 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/NodeRefreshDebouncerTest.java @@ -45,13 +45,7 @@ public void should_call_onAdd_with_bootstrap_stop_start() { int refreshNodeInterval = 30000; QueryOptions queryOptions = new QueryOptions().setRefreshNodeIntervalMillis(refreshNodeInterval); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(queryOptions) - .build()); + Cluster cluster = register(createClusterBuilder().withQueryOptions(queryOptions).build()); cluster.connect(); Host.StateListener listener = mock(Host.StateListener.class); cluster.register(listener); diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java index 7a72fc73fbf..86bc7c49648 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementInvalidationTest.java @@ -40,23 +40,9 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; -/** - * Note: at the time of writing, this test exercises features of an unreleased Cassandra version. To - * test against a local build, run with - * - *

- *   -Dcassandra.version=4.0.0 -Dcassandra.directory=/path/to/cassandra
- * 
- */ @CassandraVersion("4.0") public class PreparedStatementInvalidationTest extends CCMTestsSupport { - @Override - public Cluster.Builder createClusterBuilder() { - // TODO remove when protocol v5 is stable in C* 4 - return super.createClusterBuilderNoDebouncing().allowBetaProtocolVersion(); - } - @BeforeMethod(groups = "short", alwaysRun = true) public void setup() throws Exception { execute("CREATE TABLE prepared_statement_invalidation_test (a int PRIMARY KEY, b int, c int);"); diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index b4a9275787a..168b86730f3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -532,8 +532,7 @@ public void should_set_routing_key_on_case_insensitive_keyspace_and_table() { PreparedStatement ps = session().prepare(String.format("INSERT INTO %s.foo (i) VALUES (?)", keyspace)); BoundStatement bs = ps.bind(1); - assertThat(bs.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) - .isNotNull(); + assertThat(bs.getRoutingKey(protocolVersion, CodecRegistry.DEFAULT_INSTANCE)).isNotNull(); } @Test(groups = "short") @@ -548,17 +547,12 @@ public void should_set_routing_key_on_case_sensitive_keyspace_and_table() { PreparedStatement ps = session().prepare("INSERT INTO \"Test\".\"Foo\" (i) VALUES (?)"); BoundStatement bs = ps.bind(1); - assertThat(bs.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE)) - .isNotNull(); + assertThat(bs.getRoutingKey(protocolVersion, CodecRegistry.DEFAULT_INSTANCE)).isNotNull(); } @Test(groups = "short", expectedExceptions = InvalidQueryException.class) public void should_fail_when_prepared_on_another_cluster() throws Exception { - Cluster otherCluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build(); + Cluster otherCluster = createClusterBuilder().build(); try { PreparedStatement pst = otherCluster.connect().prepare("select * from system.peers where inet = ?"); @@ -585,9 +579,7 @@ public void should_fail_when_prepared_on_another_cluster() throws Exception { public void should_not_allow_unbound_value_on_bound_statement_when_protocol_lesser_than_v4() { Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withProtocolVersion(ccm().getProtocolVersion(ProtocolVersion.V3)) .build()); Session session = cluster.connect(); @@ -600,6 +592,9 @@ public void should_not_allow_unbound_value_on_bound_statement_when_protocol_less fail("Should not have executed statement with UNSET values in protocol V3"); } catch (IllegalStateException e) { assertThat(e.getMessage()).contains("Unset value at index 1"); + } finally { + session.close(); + cluster.close(); } } @@ -617,9 +612,7 @@ public void should_not_allow_unbound_value_on_bound_statement_when_protocol_less public void should_not_allow_unbound_value_on_batch_statement_when_protocol_lesser_than_v4() { Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withProtocolVersion(ccm().getProtocolVersion(ProtocolVersion.V3)) .build()); Session session = cluster.connect(); @@ -633,6 +626,9 @@ public void should_not_allow_unbound_value_on_batch_statement_when_protocol_less fail("Should not have executed statement with UNSET values in protocol V3"); } catch (IllegalStateException e) { assertThat(e.getMessage()).contains("Unset value at index 1"); + } finally { + session.close(); + cluster.close(); } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java b/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java index f003568fde0..f230ddbf607 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ProtocolBetaVersionTest.java @@ -22,17 +22,28 @@ package com.datastax.driver.core; import static com.datastax.driver.core.ProtocolVersion.V4; -import static com.datastax.driver.core.ProtocolVersion.V5; +import static com.datastax.driver.core.ProtocolVersion.V6; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; import com.datastax.driver.core.utils.CassandraVersion; +import org.testng.SkipException; +import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; /** Tests for the new USE_BETA flag introduced in protocol v5 and Cassandra 3.10. */ @CassandraVersion("3.10") +@CCMConfig(createCluster = false) public class ProtocolBetaVersionTest extends CCMTestsSupport { + @BeforeClass + public void checkNotCassandra4OrHigher() { + if (ccm().getCassandraVersion().getMajor() > 3) { + throw new SkipException( + "ProtocolBetaVersionTest should only be executed against C* versions >= 3.10 and < 4.0"); + } + } + /** * Verifies that the cluster builder fails when version is explicitly set and user attempts to set * beta flag. @@ -40,8 +51,7 @@ public class ProtocolBetaVersionTest extends CCMTestsSupport { * @jira_ticket JAVA-1248 */ @Test(groups = "short") - public void should_not_initialize_when_version_explicitly_required_and_beta_flag_is_set() - throws Exception { + public void should_not_initialize_when_version_explicitly_required_and_beta_flag_is_set() { try { Cluster.builder() .addContactPoints(getContactPoints()) @@ -63,8 +73,7 @@ public void should_not_initialize_when_version_explicitly_required_and_beta_flag * @jira_ticket JAVA-1248 */ @Test(groups = "short") - public void should_not_initialize_when_beta_flag_is_set_and_version_explicitly_required() - throws Exception { + public void should_not_initialize_when_beta_flag_is_set_and_version_explicitly_required() { try { Cluster.builder() .addContactPoints(getContactPoints()) @@ -81,23 +90,22 @@ public void should_not_initialize_when_beta_flag_is_set_and_version_explicitly_r /** * Verifies that the driver CANNOT connect to 3.10 with the following combination of options: - * Version V5 Flag UNSET + * Version V6 Flag UNSET * * @jira_ticket JAVA-1248 */ @Test(groups = "short") - public void should_not_connect_when_beta_version_explicitly_required_and_flag_not_set() - throws Exception { + public void should_not_connect_when_beta_version_explicitly_required_and_flag_not_set() { try { Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) - .withProtocolVersion(V5) + .withProtocolVersion(V6) .build(); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage()) - .startsWith("Can not use V5 protocol version. Newest supported protocol version is: V4"); + .startsWith("Can not use V6 protocol version. Newest supported protocol version is: V5"); } } @@ -107,12 +115,8 @@ public void should_not_connect_when_beta_version_explicitly_required_and_flag_no * * @jira_ticket JAVA-1248 */ - @Test(groups = "short", enabled = false /* @IntegrationTestDisabledCassandra3Failure */) - public void should_connect_with_beta_when_no_version_explicitly_required_and_flag_set() - throws Exception { - // Note: when the driver's ProtocolVersion.NEWEST_SUPPORTED will be incremented to V6 or higher - // a renegotiation will start taking place here and will downgrade the version from V6 to V5, - // but the test should remain valid since it's executed against 3.10 exclusively + @Test(groups = "short", enabled = false) + public void should_connect_with_beta_when_no_version_explicitly_required_and_flag_set() { Cluster cluster = Cluster.builder() .addContactPoints(getContactPoints()) @@ -120,22 +124,22 @@ public void should_connect_with_beta_when_no_version_explicitly_required_and_fla .allowBetaProtocolVersion() .build(); cluster.connect(); - assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isEqualTo(V5); + assertThat(cluster.getConfiguration().getProtocolOptions().getProtocolVersion()).isEqualTo(V6); } /** * Verifies that the driver can connect to 3.10 with the following combination of options: Version * UNSET Flag UNSET Expected version: V4 * + *

This test has been disabled as of driver 3.11 because v5 is not beta anymore in the driver. + * As a consequence, protocol negotiation without specifying an initial version is not possible + * anymore against C* >= 3.10 and < 4.0. + * * @jira_ticket JAVA-1248 */ - @Test(groups = "short") + @Test(groups = "short", enabled = false) public void - should_connect_after_renegotiation_when_no_version_explicitly_required_and_flag_not_set() - throws Exception { - // Note: when the driver's ProtocolVersion.NEWEST_SUPPORTED will be incremented to V6 or higher - // the renegotiation will start downgrading the version from V6 to V4 instead of V5 to V4, - // but the test should remain valid since it's executed against 3.10 exclusively + should_connect_after_renegotiation_when_no_version_explicitly_required_and_flag_not_set() { Cluster cluster = Cluster.builder() .addContactPoints(getContactPoints()) diff --git a/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java b/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java index 7c5263b39c6..5de374b66d7 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ProtocolVersionRenegotiationTest.java @@ -22,8 +22,8 @@ package com.datastax.driver.core; import static com.datastax.driver.core.ProtocolVersion.V1; -import static com.datastax.driver.core.ProtocolVersion.V4; import static com.datastax.driver.core.ProtocolVersion.V5; +import static com.datastax.driver.core.ProtocolVersion.V6; import static org.assertj.core.api.Assertions.assertThat; import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException; @@ -32,6 +32,7 @@ import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; +@CCMConfig(createCluster = false) public class ProtocolVersionRenegotiationTest extends CCMTestsSupport { private ProtocolVersion protocolVersion; @@ -43,7 +44,7 @@ public void setUp() { /** @jira_ticket JAVA-1367 */ @Test(groups = "short") - public void should_succeed_when_version_provided_and_matches() throws Exception { + public void should_succeed_when_version_provided_and_matches() { Cluster cluster = connectWithVersion(protocolVersion); assertThat(actualProtocolVersion(cluster)).isEqualTo(protocolVersion); } @@ -51,7 +52,7 @@ public void should_succeed_when_version_provided_and_matches() throws Exception /** @jira_ticket JAVA-1367 */ @Test(groups = "short", enabled = false /* @IntegrationTestDisabledCassandra3Failure */) @CassandraVersion("3.8") - public void should_fail_when_version_provided_and_too_low_3_8_plus() throws Exception { + public void should_fail_when_version_provided_and_too_low_3_8_plus() { UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V1); assertThat(e.getUnsupportedVersion()).isEqualTo(V1); // post-CASSANDRA-11464: server replies with client's version @@ -60,30 +61,31 @@ public void should_fail_when_version_provided_and_too_low_3_8_plus() throws Exce /** @jira_ticket JAVA-1367 */ @Test(groups = "short") - public void should_fail_when_version_provided_and_too_high() throws Exception { - if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("2.2")) >= 0) { - throw new SkipException("Server supports protocol V4"); + public void should_fail_when_version_provided_and_too_high() { + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.10")) >= 0) { + throw new SkipException("Server supports protocol V5"); } - UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V4); - assertThat(e.getUnsupportedVersion()).isEqualTo(V4); - // pre-CASSANDRA-11464: server replies with its own version - assertThat(e.getServerVersion()).isEqualTo(protocolVersion); + UnsupportedProtocolVersionException e = connectWithUnsupportedVersion(V5); + assertThat(e.getUnsupportedVersion()).isEqualTo(V5); + // see CASSANDRA-11464: for C* < 3.0.9 and 3.8, server replies with its own version; + // otherwise it replies with the client's version. + assertThat(e.getServerVersion()).isIn(V5, protocolVersion); } /** @jira_ticket JAVA-1367 */ @Test(groups = "short") - public void should_fail_when_beta_allowed_and_too_high() throws Exception { - if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("3.10")) >= 0) { - throw new SkipException("Server supports protocol protocol V5 beta"); + public void should_fail_when_beta_allowed_and_too_high() { + if (ccm().getCassandraVersion().compareTo(VersionNumber.parse("4.0.0")) >= 0) { + throw new SkipException("Server supports protocol protocol V6 beta"); } UnsupportedProtocolVersionException e = connectWithUnsupportedBetaVersion(); - assertThat(e.getUnsupportedVersion()).isEqualTo(V5); + assertThat(e.getUnsupportedVersion()).isEqualTo(V6); } /** @jira_ticket JAVA-1367 */ @Test(groups = "short", enabled = false /* @IntegrationTestDisabledCassandra3Failure */) @CCMConfig(version = "2.1.16", createCluster = false) - public void should_negotiate_when_no_version_provided() throws Exception { + public void should_negotiate_when_no_version_provided() { if (protocolVersion.compareTo(ProtocolVersion.NEWEST_SUPPORTED) >= 0) { throw new SkipException("Server supports newest protocol version driver supports"); } @@ -91,6 +93,30 @@ public void should_negotiate_when_no_version_provided() throws Exception { assertThat(actualProtocolVersion(cluster)).isEqualTo(protocolVersion); } + @Test(groups = "short") + public void should_successfully_negotiate_down_from_newest_supported_version() { + // By default, the driver will connect with ProtocolVersion.DEFAULT (<= + // ProtocolVersion.NEWEST_SUPPORTED). + // This test verifies that the driver can connect starting with the + // newest supported version, potentially renegotiating the protocol + // version to a lower version. + + // We will explicitly set a protocol version, so we need to force + // the driver to negotiate protocol version. + Cluster.shouldAlwaysNegotiateProtocolVersion = true; + + try { + Cluster cluster = connectWithVersion(ProtocolVersion.NEWEST_SUPPORTED); + assertThat(actualProtocolVersion(cluster)) + .isLessThanOrEqualTo(ProtocolVersion.NEWEST_SUPPORTED); + } catch (RuntimeException e) { + Cluster.shouldAlwaysNegotiateProtocolVersion = false; + throw e; + } + + Cluster.shouldAlwaysNegotiateProtocolVersion = false; + } + private UnsupportedProtocolVersionException connectWithUnsupportedVersion( ProtocolVersion version) { Cluster cluster = diff --git a/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java b/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java index e4beebb14c1..f6ad7e08f78 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/QueryTimestampTest.java @@ -39,7 +39,7 @@ public void onTestContextInitialized() { public Cluster.Builder createClusterBuilder() { @SuppressWarnings("deprecation") Builder builder = - Cluster.builder() + super.createClusterBuilder() .withTimestampGenerator( new TimestampGenerator() { @Override diff --git a/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java b/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java index 31dafba37b4..3e9907c57aa 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RecommissionedNodeTest.java @@ -55,9 +55,8 @@ public void should_ignore_recommissioned_node_on_reconnection_attempt() throws E // Now start the driver that will connect to node2 and node3, and consider node1 down mainCluster = - Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) .withQueryOptions(nonDebouncingQueryOptions()) .build(); mainCluster.connect(); @@ -89,9 +88,8 @@ public void should_ignore_recommissioned_node_on_control_connection_reconnect() // Start the driver, the control connection will be on node2 mainCluster = - Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) .withQueryOptions(nonDebouncingQueryOptions()) .build(); mainCluster.connect(); @@ -134,9 +132,8 @@ public void should_ignore_recommissioned_node_on_session_init() throws Exception // Start the driver, it should only connect to node 2 mainCluster = - Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) .withQueryOptions(nonDebouncingQueryOptions()) .build(); @@ -181,9 +178,8 @@ public void should_ignore_node_that_does_not_support_protocol_version_on_session // Start the driver, it should only connect to node 2 mainCluster = - Cluster.builder() - .addContactPoints(mainCcm.addressOfNode(2).getAddress()) - .withPort(mainCcm.getBinaryPort()) + TestUtils.configureClusterBuilder( + Cluster.builder(), mainCcm, mainCcm.addressOfNode(2).getAddress()) .withQueryOptions(nonDebouncingQueryOptions()) .build(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java b/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java index 24b254e0235..96ccc44122a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ReconnectionTest.java @@ -57,9 +57,7 @@ public class ReconnectionTest extends CCMTestsSupport { public void should_reconnect_after_full_connectivity_loss() throws InterruptedException { Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)) .build()); cluster.connect(); @@ -100,9 +98,7 @@ public void should_keep_reconnecting_on_authentication_error() throws Interrupte CountingAuthProvider authProvider = new CountingAuthProvider("cassandra", "cassandra"); Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() // Start with the correct auth so that we can initialize the server .withAuthProvider(authProvider) .withReconnectionPolicy(reconnectionPolicy) @@ -149,12 +145,7 @@ public void should_cancel_reconnection_attempts() throws InterruptedException { new CountingReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)); Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withReconnectionPolicy(reconnectionPolicy) - .build()); + register(createClusterBuilder().withReconnectionPolicy(reconnectionPolicy).build()); cluster.connect(); // Stop a node and cancel the reconnection attempts to it @@ -181,9 +172,7 @@ public void should_trigger_one_time_reconnect() throws InterruptedException, IOE TogglabePolicy loadBalancingPolicy = new TogglabePolicy(new RoundRobinPolicy()); Cluster cluster = register( - Cluster.builder() - .addContactPointsWithPorts(ccm().addressOfNode(1)) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withLoadBalancingPolicy(loadBalancingPolicy) .withReconnectionPolicy(new ConstantReconnectionPolicy(reconnectionDelayMillis)) .build()); @@ -238,9 +227,7 @@ public void should_use_connection_from_reconnection_in_pool() { SocketOptions socketOptions = spy(new SocketOptions()); Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withReconnectionPolicy(new ConstantReconnectionPolicy(5000)) .withLoadBalancingPolicy(loadBalancingPolicy) .withSocketOptions(socketOptions) diff --git a/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java b/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java index 4fe2a333037..52ec02163be 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/RefreshConnectedHostTest.java @@ -40,13 +40,10 @@ public void should_refresh_single_connected_host() { PoolingOptions poolingOptions = Mockito.spy(new PoolingOptions()); Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilderNoDebouncing() .withPoolingOptions(poolingOptions) .withLoadBalancingPolicy(loadBalancingPolicy) .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)) - .withQueryOptions(TestUtils.nonDebouncingQueryOptions()) .build()); Session session = cluster.connect(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java index 3689d51d134..de8c400dba9 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SSLAuthenticatedEncryptionTest.java @@ -40,11 +40,7 @@ public class SSLAuthenticatedEncryptionTest extends SSLTestBase { * @expected_result Connection can be established to a cassandra node using SSL that requires * client auth. */ - @Test( - groups = "short", - dataProvider = "sslImplementation", - dataProviderClass = SSLTestBase.class, - enabled = false /* @IntegrationTestDisabledNettyFailure @IntegrationTestDisabledSSL */) + @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) public void should_connect_with_ssl_with_client_auth_and_node_requires_auth( SslImplementation sslImplementation) throws Exception { connectWithSSLOptions(getSSLOptions(sslImplementation, true, true)); @@ -62,8 +58,7 @@ public void should_connect_with_ssl_with_client_auth_and_node_requires_auth( groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class, - expectedExceptions = {NoHostAvailableException.class}, - enabled = false /* @IntegrationTestDisabledNettyFailure @IntegrationTestDisabledSSL */) + expectedExceptions = {NoHostAvailableException.class}) public void should_not_connect_without_client_auth_but_node_requires_auth( SslImplementation sslImplementation) throws Exception { connectWithSSLOptions(getSSLOptions(sslImplementation, false, true)); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java index be4bd80e235..cd5b40d549d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SSLEncryptionTest.java @@ -41,11 +41,7 @@ public class SSLEncryptionTest extends SSLTestBase { * @test_category connection:ssl * @expected_result Connection can be established to a cassandra node using SSL. */ - @Test( - groups = "short", - dataProvider = "sslImplementation", - dataProviderClass = SSLTestBase.class, - enabled = false /* @IntegrationTestDisabledNettyFailure @IntegrationTestDisabledSSL */) + @Test(groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class) public void should_connect_with_ssl_without_client_auth_and_node_doesnt_require_auth( SslImplementation sslImplementation) throws Exception { connectWithSSLOptions(getSSLOptions(sslImplementation, false, true)); @@ -63,8 +59,7 @@ public void should_connect_with_ssl_without_client_auth_and_node_doesnt_require_ groups = "short", dataProvider = "sslImplementation", dataProviderClass = SSLTestBase.class, - expectedExceptions = {NoHostAvailableException.class}, - enabled = false /* @IntegrationTestDisabledNettyFailure @IntegrationTestDisabledSSL */) + expectedExceptions = {NoHostAvailableException.class}) public void should_not_connect_with_ssl_without_trusting_server_cert( SslImplementation sslImplementation) throws Exception { connectWithSSLOptions(getSSLOptions(sslImplementation, false, false)); @@ -87,12 +82,7 @@ public void should_not_connect_with_ssl_without_trusting_server_cert( groups = "short", expectedExceptions = {NoHostAvailableException.class}) public void should_not_connect_without_ssl_but_node_uses_ssl() throws Exception { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster = register(createClusterBuilder().build()); cluster.connect(); } @@ -110,11 +100,7 @@ public void should_reconnect_with_ssl_on_node_up(SslImplementation sslImplementa throws Exception { Cluster cluster = register( - Cluster.builder() - .addContactPoints(this.getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withSSL(getSSLOptions(sslImplementation, true, true)) - .build()); + createClusterBuilder().withSSL(getSSLOptions(sslImplementation, true, true)).build()); cluster.connect(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java b/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java index 786b388dc6d..e4b91b3abbf 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SSLTestBase.java @@ -26,7 +26,6 @@ import static io.netty.handler.ssl.SslProvider.OPENSSL; import static org.assertj.core.api.Assertions.fail; -import com.datastax.driver.core.utils.ScyllaSkip; import io.netty.handler.ssl.SslContextBuilder; import java.security.KeyStore; import java.security.SecureRandom; @@ -35,7 +34,6 @@ import javax.net.ssl.TrustManagerFactory; import org.testng.annotations.DataProvider; -@ScyllaSkip /* @IntegrationTestDisabledScyllaUnsupportedFunctionality @IntegrationTestDisabledSSL */ @CCMConfig(ssl = true, createCluster = false) public abstract class SSLTestBase extends CCMTestsSupport { @@ -59,13 +57,7 @@ public static Object[][] sslImplementation() { * be raised here if connection cannot be established. */ protected void connectWithSSLOptions(SSLOptions sslOptions) throws Exception { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withSSL(sslOptions) - .build()); + Cluster cluster = register(createClusterBuilder().withSSL(sslOptions).build()); cluster.connect(); } @@ -77,13 +69,7 @@ protected void connectWithSSLOptions(SSLOptions sslOptions) throws Exception { * be raised here if connection cannot be established. */ protected void connectWithSSL() throws Exception { - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withSSL() - .build()); + Cluster cluster = register(createClusterBuilder().withSSL().build()); cluster.connect(); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java b/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java index e281720f8a5..62bd2ece358 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ScassandraCluster.java @@ -619,6 +619,22 @@ private Object getPeerInfo(int dc, int node, String property, Object defaultValu column("schema_version", UUID) }; + /* system.peers was re-worked for DSE 6.8 */ + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_PEERS_DSE68 = { + column("peer", INET), + column("rpc_address", INET), + column("data_center", TEXT), + column("rack", TEXT), + column("release_version", TEXT), + column("tokens", set(TEXT)), + column("host_id", UUID), + column("graph", BOOLEAN), + column("schema_version", UUID), + column("native_transport_address", INET), + column("native_transport_port", INT), + column("native_transport_port_ssl", INT) + }; + public static final org.scassandra.http.client.types.ColumnMetadata[] SELECT_PEERS_V2 = { column("peer", INET), column("peer_port", INT), diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java index 972f668eba5..2340b45678b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesCCTest.java @@ -58,22 +58,10 @@ public class SchemaChangesCCTest extends CCMTestsSupport { public void should_receive_changes_made_while_control_connection_is_down_on_reconnect() throws Exception { ToggleablePolicy lbPolicy = new ToggleablePolicy(Policies.defaultLoadBalancingPolicy()); - Cluster cluster = - register( - Cluster.builder() - .withLoadBalancingPolicy(lbPolicy) - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster = register(createClusterBuilder().withLoadBalancingPolicy(lbPolicy).build()); // Put cluster2 control connection on node 2 so it doesn't go down (to prevent noise for // debugging). - Cluster cluster2 = - register( - Cluster.builder() - .withLoadBalancingPolicy(lbPolicy) - .addContactPoints(getContactPoints().get(1)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster2 = register(createClusterBuilder().withLoadBalancingPolicy(lbPolicy).build()); SchemaChangeListener listener = mock(SchemaChangeListener.class); cluster.init(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java index edf731c7629..9e4355a0313 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaChangesTest.java @@ -89,23 +89,11 @@ public class SchemaChangesTest extends CCMTestsSupport { @BeforeClass(groups = "short") public void setup() throws InterruptedException { - cluster1 = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .build(); - cluster2 = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(nonDebouncingQueryOptions()) - .build(); + cluster1 = createClusterBuilderNoDebouncing().build(); + cluster2 = createClusterBuilderNoDebouncing().build(); schemaDisabledCluster = spy( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withClusterName("schema-disabled") .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) .build()); @@ -671,9 +659,7 @@ public void should_notify_of_keyspace_drop(String keyspace) throws InterruptedEx @Test(groups = "short", expectedExceptions = IllegalStateException.class) public void should_throw_illegal_state_exception_on_newToken_with_metadata_disabled() { Cluster cluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) .build(); @@ -695,9 +681,7 @@ public void should_throw_illegal_state_exception_on_newToken_with_metadata_disab @Test(groups = "short", expectedExceptions = IllegalStateException.class) public void should_throw_illegal_state_exception_on_newTokenRange_with_metadata_disabled() { Cluster cluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) .build(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java index 65c3a727f13..ca091f7f33a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaRefreshDebouncerTest.java @@ -59,13 +59,7 @@ public void setup() { queryOptions.setRefreshSchemaIntervalMillis(DEBOUNCE_TIME); queryOptions.setMaxPendingRefreshSchemaRequests(5); // Create a separate cluster that will receive the schema events on its control connection. - cluster2 = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withQueryOptions(queryOptions) - .build()); + cluster2 = register(createClusterBuilder().withQueryOptions(queryOptions).build()); session2 = cluster2.connect(); // Create a spy of the Cluster's control connection and replace it with the spy. diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java index cc6940575af..aa0f5737954 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionLeakTest.java @@ -17,7 +17,6 @@ import static com.datastax.driver.core.Assertions.assertThat; import static com.datastax.driver.core.CreateCCM.TestMode.PER_METHOD; -import static com.datastax.driver.core.TestUtils.nonDebouncingQueryOptions; import static com.google.common.collect.Lists.newArrayList; import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.fail; @@ -40,11 +39,8 @@ public void connectionLeakTest() throws Exception { channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); Cluster cluster = register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) + createClusterBuilderNoDebouncing() .withNettyOptions(channelMonitor.nettyOptions()) - .withQueryOptions(nonDebouncingQueryOptions()) .build()); cluster.init(); @@ -102,12 +98,7 @@ public void should_not_leak_session_when_wrong_keyspace() throws Exception { channelMonitor = new SocketChannelMonitor(); channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .withNettyOptions(channelMonitor.nettyOptions()) - .build()); + register(createClusterBuilder().withNettyOptions(channelMonitor.nettyOptions()).build()); cluster.init(); assertThat(cluster.manager.sessions.size()).isEqualTo(0); try { diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java index b80ba5e2182..25fde7d5df2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionStressTest.java @@ -99,9 +99,7 @@ public void sessions_should_not_leak_connections() { // override inherited field with a new cluster object and ensure 0 sessions and connections channelMonitor.reportAtFixedInterval(1, TimeUnit.SECONDS); stressCluster = - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) + createClusterBuilder() .withPoolingOptions( new PoolingOptions().setCoreConnectionsPerHost(HostDistance.LOCAL, 1)) .withNettyOptions(channelMonitor.nettyOptions()) diff --git a/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java b/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java index 82416c567f4..06d539bb606 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StatementWrapperTest.java @@ -50,7 +50,7 @@ public void onTestContextInitialized() { @Override public Cluster.Builder createClusterBuilder() { - return Cluster.builder() + return super.createClusterBuilder() .withLoadBalancingPolicy(loadBalancingPolicy) .withSpeculativeExecutionPolicy(speculativeExecutionPolicy) .withRetryPolicy(retryPolicy); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java index 6f95ea9fb67..79dd7a939f0 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -25,6 +25,7 @@ import static java.util.concurrent.TimeUnit.MINUTES; import static java.util.concurrent.TimeUnit.SECONDS; +import com.datastax.driver.core.Cluster.Builder; import com.datastax.driver.core.policies.RoundRobinPolicy; import com.datastax.driver.core.policies.WhiteListPolicy; import com.google.common.base.Predicate; @@ -69,6 +70,9 @@ /** A number of static fields/methods handy for tests. */ public abstract class TestUtils { + private static final Map recentPorts = new HashMap(); + private static final long RECENT_PORT_TTL = 4 * 60 * (long) (1e9); // nanoseconds + private static final int MAX_FIND_PORT_RETRIES = 20; public static final String IP_PREFIX; static { @@ -759,10 +763,22 @@ public static String generateIdentifier(String prefix) { public static synchronized int findAvailablePort() throws RuntimeException { ServerSocket ss = null; try { - // let the system pick an ephemeral port - ss = new ServerSocket(0); - ss.setReuseAddress(true); - return ss.getLocalPort(); + int retries = 0; + while (retries++ < MAX_FIND_PORT_RETRIES) { + // let the system pick an ephemeral port + ss = new ServerSocket(0); + ss.setReuseAddress(true); + long time = System.nanoTime(); + int port = ss.getLocalPort(); + Long last = recentPorts.get(port); + if (last == null || time - last > RECENT_PORT_TTL) { + recentPorts.put(port, time); + logger.info("Found available port: {}", port); + return port; + } else { + ss.close(); + } + } } catch (IOException e) { throw Throwables.propagate(e); } finally { @@ -774,6 +790,8 @@ public static synchronized int findAvailablePort() throws RuntimeException { } } } + throw new RuntimeException( + "Couldn't find available port. Max retries (" + MAX_FIND_PORT_RETRIES + ") exceeded."); } private static final Predicate PORT_IS_UP = @@ -822,13 +840,41 @@ public static Cluster buildControlCluster(Cluster cluster, CCMAccess ccm) { Host controlHost = cluster.manager.controlConnection.connectedHost(); List singleAddress = Collections.singletonList(controlHost.getEndPoint().resolve()); - return Cluster.builder() - .addContactPoints(controlHost.getEndPoint().resolve().getAddress()) - .withPort(ccm.getBinaryPort()) + return configureClusterBuilder(Cluster.builder(), ccm) .withLoadBalancingPolicy(new WhiteListPolicy(new RoundRobinPolicy(), singleAddress)) .build(); } + /** + * Configures the builder with one contact point and port matching the given CCM cluster. + * Therefore it's not required to call {@link Cluster.Builder#addContactPoints}, it will be done + * automatically. + * + * @return The cluster builder (for method chaining). + */ + public static Builder configureClusterBuilder(Builder builder, CCMAccess ccm) { + // add only one contact point to force node1 to become the control host; some tests rely on + // that. + return configureClusterBuilder(builder, ccm, ccm.getContactPoints().get(0)); + } + + /** + * Configures the builder with binary port matching the given CCM cluster and with the given + * contact points. Therefore it's not required to call {@link Cluster.Builder#addContactPoints}, + * it will be done automatically. + * + * @return The cluster builder (for method chaining). + */ + public static Builder configureClusterBuilder( + Builder builder, CCMAccess ccm, InetAddress... contactPoints) { + builder + // use a different codec registry for each cluster instance + .withCodecRegistry(new CodecRegistry()) + .addContactPoints(contactPoints) + .withPort(ccm.getBinaryPort()); + return builder; + } + /** @return a {@link QueryOptions} that disables debouncing by setting intervals to 0ms. */ public static QueryOptions nonDebouncingQueryOptions() { return new QueryOptions() @@ -875,6 +921,8 @@ public static void executeNoFail(Callable task, boolean logException) { task.call(); } catch (Exception e) { if (logException) logger.error(e.getMessage(), e); + } catch (AssertionError e) { + if (logException) logger.error(e.getMessage(), e); } } @@ -988,7 +1036,7 @@ public static Level setLogLevel(String logger, Level newLevel) { /** * Throws a {@link SkipException} if the input {@link CCMAccess} does not support compact storage - * (C* 4.0+) + * (C* 4.0+ or DSE 6.0+). * * @param ccm cluster to check against */ @@ -997,5 +1045,9 @@ public static void compactStorageSupportCheck(CCMAccess ccm) { throw new SkipException( "Compact tables are not allowed in Cassandra starting with 4.0 version"); } + if (ccm.getDSEVersion() != null + && ccm.getDSEVersion().compareTo(VersionNumber.parse("6.0")) >= 0) { + throw new SkipException("Compact tables are not allowed in DSE starting with 6.0 version"); + } } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java b/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java index ecf36c2e87b..8278adb8d44 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TimeoutStressTest.java @@ -63,7 +63,7 @@ public Cluster.Builder createClusterBuilder() { channelMonitor = register(new SocketChannelMonitor()); PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, 8, 8); - return Cluster.builder() + return super.createClusterBuilder() .withPoolingOptions(poolingOptions) .withNettyOptions(channelMonitor.nettyOptions()) .withReconnectionPolicy(new ConstantReconnectionPolicy(1000)); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TransietReplicationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TransientReplicationTest.java similarity index 95% rename from driver-core/src/test/java/com/datastax/driver/core/TransietReplicationTest.java rename to driver-core/src/test/java/com/datastax/driver/core/TransientReplicationTest.java index 849d8f99a49..4a0521228a8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TransietReplicationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TransientReplicationTest.java @@ -28,11 +28,9 @@ import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; -@CassandraVersion( - value = "4.0.0-alpha1", - description = "Transient Replication is for Cassandra 4.0+") +@CassandraVersion(value = "4.0.0", description = "Transient Replication is for Cassandra 4.0+") @CCMConfig(config = "enable_transient_replication:true") -public class TransietReplicationTest extends CCMTestsSupport { +public class TransientReplicationTest extends CCMTestsSupport { private static final String TRANSIENT_REPLICATION_KEYSPACE = "transient_rep_ks"; diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java index 2394a0b92b1..d59371e7192 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecAssert.java @@ -24,7 +24,7 @@ @SuppressWarnings("unused") public class TypeCodecAssert extends AbstractAssert, TypeCodec> { - private ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED; + private ProtocolVersion version = ProtocolVersion.DEFAULT; protected TypeCodecAssert(TypeCodec actual) { super(actual, TypeCodecAssert.class); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java index 3b25b25adc7..874893fb351 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecOverlappingJavaTypeIntegrationTest.java @@ -46,7 +46,7 @@ public void onTestContextInitialized() { } public Cluster.Builder createClusterBuilder() { - return Cluster.builder() + return super.createClusterBuilder() .withCodecRegistry(new CodecRegistry().register(new IntToStringCodec())); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java index 3d7a68f7133..fecc7579403 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecTupleIntegrationTest.java @@ -97,17 +97,13 @@ public void should_handle_partial_tuples_with_default_codecs() { @Test(groups = "short") public void should_handle_tuples_with_custom_codecs() { - CodecRegistry codecRegistry = new CodecRegistry(); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCodecRegistry(codecRegistry) - .build()); + Cluster cluster = register(createClusterBuilder().build()); Session session = cluster.connect(keyspace); setUpTupleTypes(cluster); - codecRegistry.register(new LocationCodec(TypeCodec.tuple(locationType))); + cluster + .getConfiguration() + .getCodecRegistry() + .register(new LocationCodec(TypeCodec.tuple(locationType))); session.execute(insertQuery, uuid, "John Doe", locationValue); ResultSet rows = session.execute(selectQuery, uuid); Row row = rows.one(); @@ -129,17 +125,13 @@ public void should_handle_tuples_with_custom_codecs() { @Test(groups = "short") public void should_handle_partial_tuples_with_custom_codecs() { - CodecRegistry codecRegistry = new CodecRegistry(); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCodecRegistry(codecRegistry) - .build()); + Cluster cluster = register(createClusterBuilder().build()); Session session = cluster.connect(keyspace); setUpTupleTypes(cluster); - codecRegistry.register(new LocationCodec(TypeCodec.tuple(locationType))); + cluster + .getConfiguration() + .getCodecRegistry() + .register(new LocationCodec(TypeCodec.tuple(locationType))); session.execute(insertQuery, uuid, "John Doe", partialLocationValueInserted); ResultSet rows = session.execute(selectQuery, uuid); Row row = rows.one(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java index c92580782d3..d3d14966edb 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TypeCodecUDTIntegrationTest.java @@ -74,19 +74,14 @@ public void should_handle_udts_with_default_codecs() { @Test(groups = "short") public void should_handle_udts_with_custom_codecs() { - CodecRegistry codecRegistry = new CodecRegistry(); - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .withCodecRegistry(codecRegistry) - .build()); + Cluster cluster = register(createClusterBuilder().build()); Session session = cluster.connect(keyspace); setUpUserTypes(cluster); TypeCodec addressTypeCodec = TypeCodec.userType(addressType); TypeCodec phoneTypeCodec = TypeCodec.userType(phoneType); - codecRegistry + cluster + .getConfiguration() + .getCodecRegistry() .register(new AddressCodec(addressTypeCodec, Address.class)) .register(new PhoneCodec(phoneTypeCodec, Phone.class)); session.execute(insertQuery, uuid, "John Doe", address); diff --git a/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java b/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java index 5ce5355197d..eb9faa03923 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/UnresolvedUserTypeTest.java @@ -104,19 +104,12 @@ Creates the following acyclic graph (edges directed upwards public void should_resolve_nested_user_types() throws ExecutionException, InterruptedException { // Each CREATE TYPE statement in getTableDefinitions() has triggered a partial schema refresh - // that - // should have used previous UDT definitions for dependencies. + // that should have used previous UDT definitions for dependencies. checkUserTypes(cluster().getMetadata()); // Create a different Cluster instance to force a full refresh where all UDTs are loaded at - // once. - // The parsing logic should sort them to make sure they are loaded in the right order. - Cluster newCluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); + // once. The parsing logic should sort them to make sure they are loaded in the right order. + Cluster newCluster = register(createClusterBuilder().build()); checkUserTypes(newCluster.getMetadata()); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java b/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java index 2494fada318..8be308e40ca 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/VersionNumberTest.java @@ -106,6 +106,14 @@ public void should_treat_same_prerelease_equal() { assertThat(version1.hashCode()).isEqualTo(version2.hashCode()); } + @Test(groups = "unit") + public void should_parse_scylla_release_candidates() { + assertThat(VersionNumber.parse("4.3.rc5")) + .hasMajorMinorPatch(4, 3, 0) + .hasToString("4.3.0-rc5") + .hasPreReleaseLabels("rc5"); + } + private void assertOrder(String version1, String version2, int expected) { assertThat(VersionNumber.parse(version1).compareTo(VersionNumber.parse(version2))) .isEqualTo(expected); diff --git a/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java b/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java index e40c05f5153..496ccc0314b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/cloud/CloudTest.java @@ -32,6 +32,7 @@ import com.datastax.driver.core.Cluster; import com.datastax.driver.core.EndPoint; import com.datastax.driver.core.PlainTextAuthProvider; +import com.datastax.driver.core.ProtocolVersion; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.AuthenticationException; @@ -79,6 +80,7 @@ public void should_connect_to_proxy_using_absolute_path() { Session session = Cluster.builder() .withCloudSecureConnectBundle(proxy.getSecureBundleFile()) + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); ResultSet set = session.execute("select * from system.local"); @@ -90,7 +92,11 @@ public void should_connect_to_proxy_using_non_normalized_path() { String path = String.format("%s/%s", proxy.getProxyRootPath(), "certs/bundles/../bundles/creds-v1.zip"); Session session = - Cluster.builder().withCloudSecureConnectBundle(new File(path)).build().connect(); + Cluster.builder() + .withCloudSecureConnectBundle(new File(path)) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); ResultSet set = session.execute("select * from system.local"); assertThat(set).isNotNull(); } @@ -110,7 +116,12 @@ public void should_connect_to_proxy_using_file_provided_by_the_http_URL() throws new URL(String.format("http://localhost:%d%s", wireMockServer.port(), CERTS_BUNDLE_SUFFIX)); // when - Session session = Cluster.builder().withCloudSecureConnectBundle(configFile).build().connect(); + Session session = + Cluster.builder() + .withCloudSecureConnectBundle(configFile) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); // then ResultSet set = session.execute("select * from system.local"); @@ -133,7 +144,11 @@ public void should_connect_to_proxy_using_file_provided_by_input_stream() throws // when Session session = - Cluster.builder().withCloudSecureConnectBundle(configFile.openStream()).build().connect(); + Cluster.builder() + .withCloudSecureConnectBundle(configFile.openStream()) + .withProtocolVersion(ProtocolVersion.V4) + .build() + .connect(); // then ResultSet set = session.execute("select * from system.local"); @@ -146,6 +161,7 @@ public void should_connect_to_proxy_using_auth_provider() { Cluster.builder() .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) .withAuthProvider(new PlainTextAuthProvider("cassandra", "cassandra")) + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); ResultSet set = session.execute("select * from system.local"); @@ -158,6 +174,7 @@ public void should_not_connect_to_proxy_bad_creds() { Session session = Cluster.builder() .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); fail("Expected an AuthenticationException"); @@ -172,6 +189,7 @@ public void should_not_connect_to_proxy() { Session session = Cluster.builder() .withCloudSecureConnectBundle(proxy.getSecureBundleUnreachable()) + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); fail("Expected an IllegalStateException"); @@ -188,6 +206,7 @@ public void should_not_allow_contact_points_and_cloud() { .addContactPoint("127.0.0.1") .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) .withCredentials("cassandra", "cassandra") + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); fail("Expected an IllegalStateException"); @@ -206,6 +225,7 @@ public void should_not_allow_cloud_with_contact_points_string() { .withCloudSecureConnectBundle(proxy.getSecureBundleNoCredsPath()) .addContactPoint("127.0.0.1") .withCredentials("cassandra", "cassandra") + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); fail("Expected an IllegalStateException"); @@ -230,6 +250,7 @@ public InetSocketAddress resolve() { } }) .withCredentials("cassandra", "cassandra") + .withProtocolVersion(ProtocolVersion.V4) .build() .connect(); fail("Expected an IllegalStateException"); diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java index 1e9b6cbc1ff..d7d6b3a58ca 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsScassandraTest.java @@ -77,7 +77,7 @@ public void should_throw_proper_read_timeout_exception() { fail("expected a ReadTimeoutException"); } catch (ReadTimeoutException e) { assertThat(e.getMessage()) - .isEqualTo( + .contains( "Cassandra timeout during read query at consistency LOCAL_ONE (1 responses were required but only 0 replica responded)"); assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_ONE); assertThat(e.getReceivedAcknowledgements()).isEqualTo(0); diff --git a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java index 31f3c958227..3d913b81f92 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/exceptions/ExceptionsTest.java @@ -283,7 +283,7 @@ public void should_create_proper_unprepared_exception() { public void should_create_proper_read_timeout_exception() { ReadTimeoutException e = new ReadTimeoutException(endPoint1, LOCAL_QUORUM, 2, 3, true); assertThat(e.getMessage()) - .isEqualTo( + .contains( "Cassandra timeout during read query at consistency LOCAL_QUORUM (3 responses were required but only 2 replica responded)"); assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); @@ -292,7 +292,7 @@ public void should_create_proper_read_timeout_exception() { assertThat(e.getEndPoint()).isEqualTo(endPoint1); e = e.copy(endPoint2); assertThat(e.getMessage()) - .isEqualTo( + .contains( "Cassandra timeout during read query at consistency LOCAL_QUORUM (3 responses were required but only 2 replica responded)"); assertThat(e.getConsistencyLevel()).isEqualTo(LOCAL_QUORUM); assertThat(e.getReceivedAcknowledgements()).isEqualTo(2); diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java index 475d5b2b153..26a5409490f 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/CloseableLoadBalancingPolicyTest.java @@ -40,9 +40,7 @@ public void should_be_invoked_at_shutdown() { @Override public Cluster.Builder createClusterBuilder() { policy = new CloseMonitoringPolicy(Policies.defaultLoadBalancingPolicy()); - return Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withLoadBalancingPolicy(policy); + return super.createClusterBuilder().withLoadBalancingPolicy(policy); } static class CloseMonitoringPolicy extends DelegatingLoadBalancingPolicy { diff --git a/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java index d05875feadd..80a258d6905 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/policies/TokenAwarePolicyTest.java @@ -87,7 +87,7 @@ public void initMocks() { when(cluster.getConfiguration()).thenReturn(configuration); when(configuration.getCodecRegistry()).thenReturn(codecRegistry); when(configuration.getProtocolOptions()).thenReturn(protocolOptions); - when(protocolOptions.getProtocolVersion()).thenReturn(ProtocolVersion.NEWEST_SUPPORTED); + when(protocolOptions.getProtocolVersion()).thenReturn(ProtocolVersion.DEFAULT); when(cluster.getMetadata()).thenReturn(metadata); when(metadata.getReplicas(Metadata.quote("keyspace"), null, routingKey)) .thenReturn(Sets.newLinkedHashSet(host1, host2)); @@ -178,8 +178,7 @@ public void should_order_replicas_based_on_configuration( // Encodes into murmur hash '4874351301193663061' which should be owned by node 6 with // replicas 7 and 8. ByteBuffer routingKey = - TypeCodec.varchar() - .serialize("This is some sample text", ProtocolVersion.NEWEST_SUPPORTED); + TypeCodec.varchar().serialize("This is some sample text", ProtocolVersion.DEFAULT); // then: The replicas resolved from the cluster metadata must match node 6 and its replicas. List replicas = @@ -244,9 +243,7 @@ public void should_choose_proper_host_based_on_routing_key() { // Encodes into murmur hash '4557949199137838892' which should be owned by node 3. ByteBuffer routingKey = TypeCodec.varchar() - .serialize( - "should_choose_proper_host_based_on_routing_key", - ProtocolVersion.NEWEST_SUPPORTED); + .serialize("should_choose_proper_host_based_on_routing_key", ProtocolVersion.DEFAULT); SimpleStatement statement = new SimpleStatement("select * from table where k=5") .setRoutingKey(routingKey) @@ -307,7 +304,7 @@ public void should_choose_host_in_local_dc_when_using_network_topology_strategy_ TypeCodec.varchar() .serialize( "should_choose_host_in_local_dc_when_using_network_topology_strategy_and_dc_aware", - ProtocolVersion.NEWEST_SUPPORTED); + ProtocolVersion.DEFAULT); SimpleStatement statement = new SimpleStatement("select * from table where k=5") .setRoutingKey(routingKey) @@ -358,7 +355,7 @@ public void should_use_other_nodes_when_replicas_having_token_are_down() { TypeCodec.varchar() .serialize( "should_use_other_nodes_when_replicas_having_token_are_down", - ProtocolVersion.NEWEST_SUPPORTED); + ProtocolVersion.DEFAULT); SimpleStatement statement = new SimpleStatement("select * from table where k=5") .setRoutingKey(routingKey) @@ -460,7 +457,7 @@ public void should_use_provided_routing_key_boundstatement() { // Derive a routing key for single routing key component, this should resolve to // '4891967783720036163' - ByteBuffer routingKey = TypeCodec.bigint().serialize(33L, ProtocolVersion.NEWEST_SUPPORTED); + ByteBuffer routingKey = TypeCodec.bigint().serialize(33L, ProtocolVersion.DEFAULT); bs.setRoutingKey(routingKey); QueryTracker queryTracker = new QueryTracker(); @@ -479,10 +476,9 @@ public void should_use_provided_routing_key_boundstatement() { // Derive a routing key for multiple routing key components, this should resolve to // '3735658072872431718' bs = preparedStatement.bind("a", "b"); - ByteBuffer routingKeyK0Part = - TypeCodec.bigint().serialize(42L, ProtocolVersion.NEWEST_SUPPORTED); + ByteBuffer routingKeyK0Part = TypeCodec.bigint().serialize(42L, ProtocolVersion.DEFAULT); ByteBuffer routingKeyK1Part = - TypeCodec.varchar().serialize("hello_world", ProtocolVersion.NEWEST_SUPPORTED); + TypeCodec.varchar().serialize("hello_world", ProtocolVersion.DEFAULT); bs.setRoutingKey(routingKeyK0Part, routingKeyK1Part); queryTracker.query(session, 10, bs); @@ -514,9 +510,7 @@ public void should_properly_generate_and_use_routing_key_for_composite_partition ccm.start(); Cluster cluster = - Cluster.builder() - .addContactPoints(ccm.addressOfNode(1).getAddress()) - .withPort(ccm.getBinaryPort()) + TestUtils.configureClusterBuilder(Cluster.builder(), ccm) .withNettyOptions(nonQuietClusterCloseOptions) .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())) .build(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java index 2cb3d07861a..67c95ff475e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/querybuilder/QueryBuilderExecutionTest.java @@ -884,7 +884,7 @@ public void should_support_group_by() throws Exception { .groupBy("a", "clustering1")); fail("Expecting IQE"); } catch (InvalidQueryException e) { - assertThat(e.getMessage()).isEqualTo("Undefined column name clustering1"); + assertThat(e.getMessage()).startsWith("Undefined column name clustering1"); } try { @@ -899,7 +899,7 @@ public void should_support_group_by() throws Exception { .groupBy("a", "b", "z")); fail("Expecting IQE"); } catch (InvalidQueryException e) { - assertThat(e.getMessage()).isEqualTo("Undefined column name z"); + assertThat(e.getMessage()).startsWith("Undefined column name z"); } // Test with composite partition key diff --git a/driver-core/src/test/resources/client.crt b/driver-core/src/test/resources/client.crt index 241e5f545d6..8d548f8d327 100644 --- a/driver-core/src/test/resources/client.crt +++ b/driver-core/src/test/resources/client.crt @@ -1,19 +1,25 @@ -----BEGIN CERTIFICATE----- -MIIDqTCCApGgAwIBAgIERLZiJzANBgkqhkiG9w0BAQsFADCBhDELMAkGA1UEBhMCVVMxEzARBgNV -BAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC1NhbnRhIENsYXJhMRYwFAYDVQQKEw1EYXRhU3RheCBJ -bmMuMRowGAYDVQQLExFEcml2ZXJzIGFuZCBUb29sczEWMBQGA1UEAxMNRHJpdmVyIENsaWVudDAe -Fw0xNTAzMTIwMTA4MjRaFw0xNTA2MTAwMTA4MjRaMIGEMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -Q2FsaWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExFjAUBgNVBAoTDURhdGFTdGF4IEluYy4x -GjAYBgNVBAsTEURyaXZlcnMgYW5kIFRvb2xzMRYwFAYDVQQDEw1Ecml2ZXIgQ2xpZW50MIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAq0J0EoZQnOv2KRrvwA+1ZL9VZ3hDdQMwkDfitoGN -B6upvMUZpf8W+ReQmaY6yacYJthHzsZTd3G97Bw81/3VNHQB9PnXGmbupMLVXeFXysSCs1nPEdJl -TBbJXWHSh41AE4ejJaoCoTuigKGwI9lTbOOPDz/WMcio9nagsCJdsdG2+TxmR7RlyzEIANJ0wpnL -JEIeJmRS2loLVuCU4lZ9hDLN57cP9jEVD4Hk2kJD4Exx7G9HQFH+/63H6XtEDZsJcYldR7yBNsGr -pz9CupULCS1R40ePQEIlUXhM4ft/hsljQybLQvvfXNVTvk5WgY7LNaBJy6A/Tfg32SXEn3wUvwID -AQABoyEwHzAdBgNVHQ4EFgQUt+JDOeziZzHNYTFU/FL9PhDGqSQwDQYJKoZIhvcNAQELBQADggEB -ADOYpa1f9dPcVLq3RiMytajHo3YJ0AQqGRzVgngkeRFSdhyy/y+/8D0/V5s6QbNt/l6x3FxkoiTR -1Lptf96eylnS5AkGQTgogJP53cSNrqkDL0IyyvErSiATEXNpBKz6ivY+e5J1GLTfX9Ylu8limzIq -Y6YBnr8fMLD6XWraxtzzkJ9NIPhhaz696rxqr8ix6uy0mgxR/7/jUglreimZkLW40/qiABgX7Evw -UqpuJWmqNbQP9UXecx/UJ0hdxxxuxkZsoRoQwWYhkeT4aGCLJv/hjiNTfFAt23uHe0LVfW/HqykW -KoEj8F08mJVe5ZfpjF974i5qO9PU9XxvLfLjNvo= +MIIEPTCCAyWgAwIBAgIUXQymVOmOttUHZI7Gi5AsX+VNprwwDQYJKoZIhvcNAQEL +BQAwgawxCzAJBgNVBAYTAlBMMREwDwYDVQQIDAhNYXpvdmlhbjEPMA0GA1UEBwwG +V2Fyc2F3MRwwGgYDVQQKDBNTY3lsbGFEQiBTcC4geiBvLm8uMRYwFAYDVQQLDA1T +Y3lsbGEgTm9kZSAxMRYwFAYDVQQDDA1TY3lsbGEgTm9kZSAxMSswKQYJKoZIhvcN +AQkBFhxwaW90ci5ncmFib3dza2lAc2N5bGxhZGIuY29tMCAXDTIyMDYxNzEwNTkw +NVoYDzIxMjIwNTI0MTA1OTA1WjCBrDELMAkGA1UEBhMCUEwxETAPBgNVBAgMCE1h +em92aWFuMQ8wDQYDVQQHDAZXYXJzYXcxHDAaBgNVBAoME1NjeWxsYURCIFNwLiB6 +IG8uby4xFjAUBgNVBAsMDVNjeWxsYSBOb2RlIDExFjAUBgNVBAMMDVNjeWxsYSBO +b2RlIDExKzApBgkqhkiG9w0BCQEWHHBpb3RyLmdyYWJvd3NraUBzY3lsbGFkYi5j +b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKYYuwE878o7A/h2eW +tpmOCIX6iRLA0p+6Ndhsm6WVts/YqbxB9J0ICzZs4RWf2rbE55qz0FpLZol4qwWO +WEizRfGSBnJPiUMKHEUyU51rGXjjwGbxBvV06dQlcfMT1wxhJ+pTEuYPsawQLoGR +2ByUecELweQwZ+ynDIPqeoUZnUFxHi+r4Aszf6xlLFyb2ePU2op0n/RMqLpqtFPK +BhjhXkbCcwk246C5M6bEjC0s09H9oyO4WM37cFjMcRMX+w8R0/VwKAtS0ztekXsg +VYD94fEKAs8LafjoYTpPqhQ9XEKbKHB9WHedlaPw+5Vldvv1Wrbv1n1G7fc+X69D +mnLnAgMBAAGjUzBRMB0GA1UdDgQWBBQEC/E0DYGLVULfF/fr1wgVbxGYoTAfBgNV +HSMEGDAWgBQEC/E0DYGLVULfF/fr1wgVbxGYoTAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4IBAQC6voLj0O//6Mfc0FUb8S3v4QFd/1NY94PjTSL3jfVC +5LVuynuLrhYXm8c985310DrsPwxpCuEFm7dDHC+WIo7gu1TZdNNP7NtFVUzHeJFo +p+gIMUEEyou46glEx6pBJSj3DpvQu2BFLIotKfjAmc0qTLj4mL9/rWTjy0uKs5Ls +6A5+xzIvVQF5GLP7vn6dD72DDzNxcUynrJXx4q/iczmYvQtGFjJ7gG+nrmefXF0T +MDVftixHw5ZHT0pkH2FA8OzqzIDfU4hovkFiFb5fIea9kbbD1HpjAegZshHKql+y +U8/8o09NhR7SLhZwZxiuhJo2d/qD2SvJcCyPWqrtczZ8 -----END CERTIFICATE----- diff --git a/driver-core/src/test/resources/client.key b/driver-core/src/test/resources/client.key index 05bb6fad83d..ac1ca512ba0 100644 --- a/driver-core/src/test/resources/client.key +++ b/driver-core/src/test/resources/client.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCrQnQShlCc6/Yp -Gu/AD7Vkv1VneEN1AzCQN+K2gY0Hq6m8xRml/xb5F5CZpjrJpxgm2EfOxlN3cb3s -HDzX/dU0dAH0+dcaZu6kwtVd4VfKxIKzWc8R0mVMFsldYdKHjUATh6MlqgKhO6KA -obAj2VNs448PP9YxyKj2dqCwIl2x0bb5PGZHtGXLMQgA0nTCmcskQh4mZFLaWgtW -4JTiVn2EMs3ntw/2MRUPgeTaQkPgTHHsb0dAUf7/rcfpe0QNmwlxiV1HvIE2waun -P0K6lQsJLVHjR49AQiVReEzh+3+GyWNDJstC+99c1VO+TlaBjss1oEnLoD9N+DfZ -JcSffBS/AgMBAAECggEAMHATNEoY8skqTmX3+XJ3847KMQGq0qWcTq3/yW7K3KiI -0YNNxc1oSfuIQmzpo69G/XWembUuVlItTWKPMufwLW3CP++KD0WdqawRfQQHOKpr -7R4xmvDPBb5MJcVNLlmdDekHE9gJ9mBPjeItV3ZYSivygnWjt2DxqQPUXvzZUzlu -munh3H5x6ehXVHDYGzosPgTpfmLHdlNfvF4x9bcklMMbCOyoPttXB2uHWOvUIS+/ -2YEkPmJfZdpudI7RqN75yYi7N8+gpnCTp530zA2yONyZ8THqEG/0nWy+02/zm5sm -gs1saYNwXME2IPekZNM/pJh2DtnTcxZaUt84q2nhAQKBgQDi8mgvE8ekbs6DLfKK -YAtTuOcLRpuvJqxtiQecqaumzgZnmHtkm6yuDNjieqB6OITudP4NdhPpyvOFJw46 -zTHMpGqZboxHuxoxMOgmyeiO+cdSwGHobr1zUcT8jVmLH7A+LtL5hHi+733EbCRh -sF04Vq9L46Q52mhcZKbs56U8MQKBgQDBLwotnOJH7hZD5sKS0o8/Sfj3pgzXEDpL -RfnrBPGhLn+1zhPEYsEW3mKI/yHiOZHNXZMQ6oYmxThg03qKTjaY8OIm8sg/zrlZ -M+o3wVnAzayrhw5gZ8DzqioHhEUMOAwwRFXRpfxqj8regrLjE9KaYty8ZYAFtwuH -W2S3+MVT7wKBgGQx7XlLXErmeNpFgN1Cxf1ylt7Nj5Jmmp3Jb8jkx9ne/8jg8ylZ -6YT2OxLSXONY7Kdyk29SADyp05WnxoqDaUcWF9IhkmFg45FwLC5j2f61nCCWuyMp -MQ8mvLdbmHrpxJ/PgGmU6NIzXe1IaU+P07g53S6+FBVOreCMt33ET5khAoGAGgKz -ZCDTdsvfw5S2bf5buzHCi9WXtP1CXBA37iTkQ8d2+oucrbx+Mw4ORlPTxBnsP7Jx -sr1hAqdbR+4xeZ2+TCliycu2mqDC4/fReWBXLVaEATRWAzT1DdnDfu+YPGTvfzA0 -Pd4TdmWV8w+19k0c9hyJi/Q+oIZczwTHMt4T85ECgYAe4J0ht6b6kPEG3d9vxmMN -T23S+ucYLHnfT1nacTuBZnMphWHhSqf8UJloIGpusxDU84MdAp22Jpd9SfPi9KK9 -yZY9WDJGeb0Yk7ML1R5GcAAkM78lUw/rS2VfMjQFnnUl2jVMS8adcm8/vHcpkcn7 -MufMEZzDpeO/aI8nbClktw== +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDKYYuwE878o7A/ +h2eWtpmOCIX6iRLA0p+6Ndhsm6WVts/YqbxB9J0ICzZs4RWf2rbE55qz0FpLZol4 +qwWOWEizRfGSBnJPiUMKHEUyU51rGXjjwGbxBvV06dQlcfMT1wxhJ+pTEuYPsawQ +LoGR2ByUecELweQwZ+ynDIPqeoUZnUFxHi+r4Aszf6xlLFyb2ePU2op0n/RMqLpq +tFPKBhjhXkbCcwk246C5M6bEjC0s09H9oyO4WM37cFjMcRMX+w8R0/VwKAtS0zte +kXsgVYD94fEKAs8LafjoYTpPqhQ9XEKbKHB9WHedlaPw+5Vldvv1Wrbv1n1G7fc+ +X69DmnLnAgMBAAECggEBAMd8z4yIegdd8mUsu1c8NrBihm4mn22iMAAFA4hLBDVH +NimDZiY9UGyVYNiCJiakGHSPN1b3kYmxUOC484kUTNUppK0j3Zm423Qj2bF56jmf +qiIVjWMYUOslchBW+hwcZf9+sOOiRVRC9jAdJ2rzc3iHa1qDjMKKLiObmoyOqJRN +QzERogkpluGnZi7kPTJvdwSY5iW5YLIkYt026GreC1eSHuacOzMtrFaAqEXTDmQl +jhi11HeYjAaGMAzYqBgJ+AMgxbBAejzlYqd+LLkicDuSEollaTbmHqnvNXxdMwX3 +sYmTNC093za6xBgG2JhglRIy4XVMlYm/WaSux/RxEkECgYEA+5S6WG/0Kpgc0xxl +5nP7edON7vUkYp8rSbKEzlc//ywZPioI3jxQEf51AG75ynESV7GhmbgkVJDkVBCy +iUjY4STzmaHmcJ0F1hQhDyZQFwsp3iWQXrC03VCAu2+bmhV7sC7+7bmUB2ZCaKPr +zxKUzb5CvbyWdhv9Oyi3JHYawWMCgYEAze+VMI+6rKo61yq+O/yDOyn5aI6bV2z8 +hFZP5jK9hByxt38quTMpk7jSoJohup3gSLBciYkT5rMK3fLngqxY7g5BnplyWnWC +tKe8trPt+uaxzdyVvtVWIsoiYFTOqXGbbh4njdvike39HZ27yqNvhYudDBgm6Hxt +yew4nOyIIa0CgYEAvoWIhXbrIuKlWZ708Iw/EqiKoT9WubUulBY0iwr6NSVRTImK +w7pLLQVV1+kzFAODT5P+/Km+IajEsYEfo+XeEMrqDBvWpsvTYqdFzfJGztyubURn +S75oif3wL4I7BXplfCxq9N8IYn4nppUU/HFm7VS7uHK8Q1+463dPEra6Vl8CgYEA +xTjpvDQqJypwZ5h7z58pXwP34/aPNs8dlu/CXUfC0D9aw8158RvaearLrRtvqfWA +0px0QxOQh6R4h88VgMPl9zaYCXgTWCaqIrFxvdfb1ioRpoHWt4hAkdjbyz3tHcG7 +hV5Dq0wmVHODsVVaf9q99zuaD/chbA2eZ39fSIoOZZECgYBv+9HIrby+yvhjMacV +D3rwbU1uv3/NFhO0rgP8dDCFCuKxhPzYmiTBklD+81gCaz4pDuIYqwlvKLcVIeER +C6IzDCfWSWibVcHPd0rEcPJC9dLy3OnU5aDQd9j94yEqHwEK6u4ZVvOiO1ASMiMC +exrB+xuqM00fiR3hhoRV168wbA== -----END PRIVATE KEY----- diff --git a/driver-core/src/test/resources/client.keystore b/driver-core/src/test/resources/client.keystore index f1030fdb377..6e98f75cc2b 100644 Binary files a/driver-core/src/test/resources/client.keystore and b/driver-core/src/test/resources/client.keystore differ diff --git a/driver-core/src/test/resources/client.truststore b/driver-core/src/test/resources/client.truststore index ac2b87c4405..b698d7bfe40 100644 Binary files a/driver-core/src/test/resources/client.truststore and b/driver-core/src/test/resources/client.truststore differ diff --git a/driver-core/src/test/resources/server.crt b/driver-core/src/test/resources/server.crt new file mode 100644 index 00000000000..632567e1743 --- /dev/null +++ b/driver-core/src/test/resources/server.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIUPFITBaxiUFYSeETN6rIB0xUkBJ4wDQYJKoZIhvcNAQEL +BQAwgawxCzAJBgNVBAYTAlBMMREwDwYDVQQIDAhNYXpvdmlhbjEPMA0GA1UEBwwG +V2Fyc2F3MRwwGgYDVQQKDBNTY3lsbGFEQiBTcC4geiBvLm8uMRYwFAYDVQQLDA1T +Y3lsbGEgU2VydmVyMRYwFAYDVQQDDA1TY3lsbGEgU2VydmVyMSswKQYJKoZIhvcN +AQkBFhxwaW90ci5ncmFib3dza2lAc2N5bGxhZGIuY29tMCAXDTIyMDYxNzEwNTgz +M1oYDzIxMjIwNTI0MTA1ODMzWjCBrDELMAkGA1UEBhMCUEwxETAPBgNVBAgMCE1h +em92aWFuMQ8wDQYDVQQHDAZXYXJzYXcxHDAaBgNVBAoME1NjeWxsYURCIFNwLiB6 +IG8uby4xFjAUBgNVBAsMDVNjeWxsYSBTZXJ2ZXIxFjAUBgNVBAMMDVNjeWxsYSBT +ZXJ2ZXIxKzApBgkqhkiG9w0BCQEWHHBpb3RyLmdyYWJvd3NraUBzY3lsbGFkYi5j +b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwwh66j+kV2IxZbfZU +o1iodP98lgy6TSSauPaiqcW3Pe82kOioQVRZspJMBYhPoAfiiibyVoyKVZD+NN3d +BJe4q/qxgnH1thP2N8X73CsD9TOG5MxV4qQa8M/B8TbuQNS5NXW74Lx4g97RStsH ++C5lHVpNiX32m5/LwK1JnoUx+7UAFBjn3VRfIlCCI7iGWeRdqcx4c5/kmG4N1KcM ++/65lKOe0ppd4wPywlOuR7Mq6Pk0Z/jjNplJR+clICEqcqDYSvTz8i3aVGsCFFAM +Sd66BVIF3/bXsUq0DY0QMn4ODoh4g5LAuzj+MRtoscziXIObXYHrcu1Y9loJIsKO +MnmNAgMBAAGjUzBRMB0GA1UdDgQWBBQbUSOSsbqiAICHGLRg7Zrb+XGT2DAfBgNV +HSMEGDAWgBQbUSOSsbqiAICHGLRg7Zrb+XGT2DAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4IBAQAfCtnXS3Mw8O86s8fF5jcPZDqPRhckh0iMQ6odh9f+ +WfWIo+NDIP7ELFvhl1s2GsIKAhAW39HRU51xW0VsCwK9rijNp5pwsjnCR7Gu+r5H +P90EKld9o1+e4CNTd1djHGZFxKmTc7GCBOxrDaBPjyhTcSeNrVNjiomJacqZhHmY +TXOOePN1sfU6zFH7gdjv+ydnaMxfTKuhJdQlyXH5Ny8ZOXH988kZcCtC0YdF6c/B +xlHM7muPeyiEioerM+xmRYsiZPURBbnN7Du/4o4NQ+qlYR2+zH6zrCu5cpmG2ugz +5UyzLR3AGVE9AeV+tLQepvIaCGO7aHQoNZWKd5bDJPkO +-----END CERTIFICATE----- diff --git a/driver-core/src/test/resources/server.key b/driver-core/src/test/resources/server.key new file mode 100644 index 00000000000..b234b426513 --- /dev/null +++ b/driver-core/src/test/resources/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCwwh66j+kV2IxZ +bfZUo1iodP98lgy6TSSauPaiqcW3Pe82kOioQVRZspJMBYhPoAfiiibyVoyKVZD+ +NN3dBJe4q/qxgnH1thP2N8X73CsD9TOG5MxV4qQa8M/B8TbuQNS5NXW74Lx4g97R +StsH+C5lHVpNiX32m5/LwK1JnoUx+7UAFBjn3VRfIlCCI7iGWeRdqcx4c5/kmG4N +1KcM+/65lKOe0ppd4wPywlOuR7Mq6Pk0Z/jjNplJR+clICEqcqDYSvTz8i3aVGsC +FFAMSd66BVIF3/bXsUq0DY0QMn4ODoh4g5LAuzj+MRtoscziXIObXYHrcu1Y9loJ +IsKOMnmNAgMBAAECggEAEPTeW8lYc+cNN7sLyXzOEYL/y2TdnbWu0ET0LBCbTgPc +AeEO4K07uUsfK7nQJMtWVk4ohFD3NIIW4bwSmJRLNcURzJuV8RtheNpK3Ol3Gu8X +3VXvoEGYupjlZVaNjXecsJPHUpmxmuITqwJCVtZhJQQW2J9a/SBayQIX3Zsebfft +pHDZZgV2WLWYpLplmxqqxvEiSx+enTHRQYoiKMfmPcBlbzybgFatKe3vIRDTaMPL +ORakqu1NE/mBOVLYUUNZn47xi36Kt8JGHsXPk31ReYh2Y5CYzU0miUhPrRfIVT36 +XjHtw6d/5WF17i/69/pgUYoTbfhuMxIH9FyOjtUXhQKBgQDnhc+1PnfQc0LBA290 +IpHvPWK72i7gQdkpwB/lebsBT7nPiAS575WjssKVdO0UkVL3yd38fGSlVWh0NSzb +au07rGZkNMD4HzoeoXrPE+nxDUj3YIs5u8Lv6u1NYShpYSCcYcAvKzvI7/7VGcbh +t3UbEzarEaFS0DzD4K6AyPRCpwKBgQDDchp0kLn9/WJ/j4T6li4V4Un8sjlwfFTj +/RuAl101DL7OZ0Nci15Lav6E5Es/yD3ZecX5kZ30Y6nQZO7ZRLntYUsClEVbbKM9 +t3S4asyb1fIloO8W25jZ+VfpyoGIZdJKOVy8+qIGwtoQktcp4F/NWvp09euoJ49m +lRJKWKjsqwKBgFk3AREQX08d+y0xS7Nu1tlfyDonG0UjWrFx7ViaDeu1cx57hYa4 +werwqXa6NdllHpmJWlc+//GN9dBB5WUv0PbbuiEHCw0Ry22eQBqny9AtlF5u/3dh +2P5mgsb8l3v0B4L7T/ldKY6GnKY1ZqP389JZFwnYo1esEp5QaC4MQcOlAoGBAKyP +Zuo6Ce1FtZyYk7lHtYLd+Zwho3M8Yxs0kv6zJbk7+o90PtZLusJtwjCE/CdhXbJQ +wMn7lnF7OShkpTmxY1NCDp9FWANDZNGN04Er0mFYwUYV1GLjGg6CBuvFPx61f7jG +8qlrxHihuzTc4Rn5H7zJMc7ofdtmqkbRIWgA5YynAoGALwtuVOjSKWHVPQJIOxpF +AXMDw/2rgBHv8Ty/LCDRZTCLml34dCbESlBDLzzvn2kq5B4tfJcgXlKYrswLwc25 +5OTEvueg7JdkeyIYpznTU0ve4HRd21YKpPe0mAPxmwKZoBxXISlshAYgTaRcna3S +f/Lx+nkNVqDCzb2Y3/89Cgk= +-----END PRIVATE KEY----- diff --git a/driver-core/src/test/resources/server.keystore b/driver-core/src/test/resources/server.keystore index a8959b18888..0023f5ac3ae 100644 Binary files a/driver-core/src/test/resources/server.keystore and b/driver-core/src/test/resources/server.keystore differ diff --git a/driver-core/src/test/resources/server.truststore b/driver-core/src/test/resources/server.truststore index 1a3cfeea19f..e0a05100e67 100644 Binary files a/driver-core/src/test/resources/server.truststore and b/driver-core/src/test/resources/server.truststore differ diff --git a/driver-core/src/test/resources/server.truststore.pem b/driver-core/src/test/resources/server.truststore.pem new file mode 100644 index 00000000000..8d548f8d327 --- /dev/null +++ b/driver-core/src/test/resources/server.truststore.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIUXQymVOmOttUHZI7Gi5AsX+VNprwwDQYJKoZIhvcNAQEL +BQAwgawxCzAJBgNVBAYTAlBMMREwDwYDVQQIDAhNYXpvdmlhbjEPMA0GA1UEBwwG +V2Fyc2F3MRwwGgYDVQQKDBNTY3lsbGFEQiBTcC4geiBvLm8uMRYwFAYDVQQLDA1T +Y3lsbGEgTm9kZSAxMRYwFAYDVQQDDA1TY3lsbGEgTm9kZSAxMSswKQYJKoZIhvcN +AQkBFhxwaW90ci5ncmFib3dza2lAc2N5bGxhZGIuY29tMCAXDTIyMDYxNzEwNTkw +NVoYDzIxMjIwNTI0MTA1OTA1WjCBrDELMAkGA1UEBhMCUEwxETAPBgNVBAgMCE1h +em92aWFuMQ8wDQYDVQQHDAZXYXJzYXcxHDAaBgNVBAoME1NjeWxsYURCIFNwLiB6 +IG8uby4xFjAUBgNVBAsMDVNjeWxsYSBOb2RlIDExFjAUBgNVBAMMDVNjeWxsYSBO +b2RlIDExKzApBgkqhkiG9w0BCQEWHHBpb3RyLmdyYWJvd3NraUBzY3lsbGFkYi5j +b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKYYuwE878o7A/h2eW +tpmOCIX6iRLA0p+6Ndhsm6WVts/YqbxB9J0ICzZs4RWf2rbE55qz0FpLZol4qwWO +WEizRfGSBnJPiUMKHEUyU51rGXjjwGbxBvV06dQlcfMT1wxhJ+pTEuYPsawQLoGR +2ByUecELweQwZ+ynDIPqeoUZnUFxHi+r4Aszf6xlLFyb2ePU2op0n/RMqLpqtFPK +BhjhXkbCcwk246C5M6bEjC0s09H9oyO4WM37cFjMcRMX+w8R0/VwKAtS0ztekXsg +VYD94fEKAs8LafjoYTpPqhQ9XEKbKHB9WHedlaPw+5Vldvv1Wrbv1n1G7fc+X69D +mnLnAgMBAAGjUzBRMB0GA1UdDgQWBBQEC/E0DYGLVULfF/fr1wgVbxGYoTAfBgNV +HSMEGDAWgBQEC/E0DYGLVULfF/fr1wgVbxGYoTAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4IBAQC6voLj0O//6Mfc0FUb8S3v4QFd/1NY94PjTSL3jfVC +5LVuynuLrhYXm8c985310DrsPwxpCuEFm7dDHC+WIo7gu1TZdNNP7NtFVUzHeJFo +p+gIMUEEyou46glEx6pBJSj3DpvQu2BFLIotKfjAmc0qTLj4mL9/rWTjy0uKs5Ls +6A5+xzIvVQF5GLP7vn6dD72DDzNxcUynrJXx4q/iczmYvQtGFjJ7gG+nrmefXF0T +MDVftixHw5ZHT0pkH2FA8OzqzIDfU4hovkFiFb5fIea9kbbD1HpjAegZshHKql+y +U8/8o09NhR7SLhZwZxiuhJo2d/qD2SvJcCyPWqrtczZ8 +-----END CERTIFICATE----- diff --git a/driver-dist/pom.xml b/driver-dist/pom.xml index 053a1eb744d..ed44113afd1 100644 --- a/driver-dist/pom.xml +++ b/driver-dist/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-dist diff --git a/driver-examples/pom.xml b/driver-examples/pom.xml index f14f509497a..5f28572501b 100644 --- a/driver-examples/pom.xml +++ b/driver-examples/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-examples diff --git a/driver-examples/src/main/java/com/datastax/driver/examples/apollo/ApolloReadCassandraVersion.java b/driver-examples/src/main/java/com/datastax/driver/examples/astra/AstraReadCassandraVersion.java similarity index 79% rename from driver-examples/src/main/java/com/datastax/driver/examples/apollo/ApolloReadCassandraVersion.java rename to driver-examples/src/main/java/com/datastax/driver/examples/astra/AstraReadCassandraVersion.java index 18ee0ae907d..59591aa2cf8 100644 --- a/driver-examples/src/main/java/com/datastax/driver/examples/apollo/ApolloReadCassandraVersion.java +++ b/driver-examples/src/main/java/com/datastax/driver/examples/astra/AstraReadCassandraVersion.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.datastax.driver.examples.apollo; +package com.datastax.driver.examples.astra; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.ResultSet; @@ -23,27 +23,28 @@ import java.io.File; /** - * Connects to a DataStax Apollo cluster and extracts basic information from it. + * Connects to a DataStax Astra cluster and extracts basic information from it. * *

Preconditions: * *

    - *
  • A DataStax Apollo cluster is running and accessible. - *
  • A DataStax Apollo secure connect bundle for the running cluster. + *
  • A DataStax Astra cluster is running and accessible. + *
  • A DataStax Astra secure connect bundle for the running cluster. *
* *

Side effects: none. * + * @see Creating an + * Astra Database * @see - * Creating an Apollo Database - * @see - * Providing access to Apollo databases - * @see - * Obtaining Apollo secure connect bundle + * href="https://docs.astra.datastax.com/docs/obtaining-database-credentials#sharing-your-secure-connect-bundle"> + * Providing access to Astra databases + * @see + * Obtaining Astra secure connect bundle * @see Java driver online manual */ -public class ApolloReadCassandraVersion { +public class AstraReadCassandraVersion { public static void main(String[] args) { @@ -58,7 +59,7 @@ public static void main(String[] args) { Cluster.builder() // Change the path here to the secure connect bundle location (see javadocs above) .withCloudSecureConnectBundle(new File("/path/to/secure-connect-database_name.zip")) - // Change the user_name and password here for the Apollo instance + // Change the user_name and password here for the Astra instance .withCredentials("user_name", "password") // Uncomment the next line to use a specific keyspace // .withKeyspace("keyspace_name") diff --git a/driver-extras/pom.xml b/driver-extras/pom.xml index ee94a1ab76e..b7c812d36f9 100644 --- a/driver-extras/pom.xml +++ b/driver-extras/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-extras diff --git a/driver-extras/src/test/java/com/datastax/driver/extras/codecs/arrays/ArrayCodecsTest.java b/driver-extras/src/test/java/com/datastax/driver/extras/codecs/arrays/ArrayCodecsTest.java index d541fc1ed7c..762791feddf 100644 --- a/driver-extras/src/test/java/com/datastax/driver/extras/codecs/arrays/ArrayCodecsTest.java +++ b/driver-extras/src/test/java/com/datastax/driver/extras/codecs/arrays/ArrayCodecsTest.java @@ -63,7 +63,7 @@ public void onTestContextInitialized() { @Override public Cluster.Builder createClusterBuilder() { - return Cluster.builder() + return super.createClusterBuilder() .withCodecRegistry( new CodecRegistry() .register(IntArrayCodec.instance) diff --git a/driver-mapping/pom.xml b/driver-mapping/pom.xml index 3d09d5e21da..52e6bb839e0 100644 --- a/driver-mapping/pom.xml +++ b/driver-mapping/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-mapping diff --git a/driver-mapping/src/test/java/com/datastax/driver/mapping/MapperUDTTest.java b/driver-mapping/src/test/java/com/datastax/driver/mapping/MapperUDTTest.java index ac075a1f4e6..12fb9b4f4b5 100644 --- a/driver-mapping/src/test/java/com/datastax/driver/mapping/MapperUDTTest.java +++ b/driver-mapping/src/test/java/com/datastax/driver/mapping/MapperUDTTest.java @@ -338,12 +338,7 @@ public void testAccessor() throws Exception { public void should_be_able_to_use_udtCodec_standalone() { // Create a separate Cluster/Session to start with a CodecRegistry from scratch (so not already // registered). - Cluster cluster = - register( - Cluster.builder() - .addContactPoints(getContactPoints()) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster = register(createClusterBuilder().build()); CodecRegistry registry = cluster.getConfiguration().getCodecRegistry(); Session session = cluster.connect(keyspace); @@ -557,7 +552,7 @@ public void should_throw_error_when_table_is_altered_and_is_not_compatible_anymo } catch (InvalidQueryException e) { // Error message varies by C* version. assertThat(e.getMessage()) - .isIn("Unknown identifier mainaddress", "Undefined column name mainaddress"); + .matches("(Unknown identifier mainaddress|Undefined column name mainaddress.*)"); } try { mapper.get(user.getUserId()); @@ -565,17 +560,15 @@ public void should_throw_error_when_table_is_altered_and_is_not_compatible_anymo } catch (InvalidQueryException e) { // Error message varies by C* version. assertThat(e.getMessage()) - .isIn( - "Undefined name mainaddress in selection clause", - "Undefined column name mainaddress"); + .matches( + "(Undefined name mainaddress in selection clause|Undefined column name mainaddress.*)"); } // trying to use a new mapper try { manager.mapper(User.class); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage()) - .isIn(String.format("Column mainaddress does not exist in table \"%s\".users", keyspace)); + assertThat(e.getMessage()).startsWith("Column mainaddress does not exist"); } } diff --git a/driver-mapping/src/test/java/com/datastax/driver/mapping/UDTFieldMapperTest.java b/driver-mapping/src/test/java/com/datastax/driver/mapping/UDTFieldMapperTest.java index 9256f7f74d4..f66b4a5708a 100644 --- a/driver-mapping/src/test/java/com/datastax/driver/mapping/UDTFieldMapperTest.java +++ b/driver-mapping/src/test/java/com/datastax/driver/mapping/UDTFieldMapperTest.java @@ -38,12 +38,7 @@ public class UDTFieldMapperTest extends CCMTestsSupport { @Test(groups = "short") public void udt_and_tables_with_ks_created_in_another_session_should_be_mapped() { - Cluster cluster1 = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster1 = register(createClusterBuilder().build()); Session session1 = cluster1.connect(); // Create type and table session1.execute( @@ -57,12 +52,7 @@ public void udt_and_tables_with_ks_created_in_another_session_should_be_mapped() cluster1.close(); // Create entities with another connection - Cluster cluster2 = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster2 = register(createClusterBuilder().build()); Session session2 = cluster2.newSession(); Mapper hashMapper = new MappingManager(session2).mapper(MyHashWithKeyspace.class); @@ -81,12 +71,7 @@ public void udt_and_tables_with_ks_created_in_another_session_should_be_mapped() @Test(groups = "short") public void udt_and_tables_without_ks_created_in_another_session_should_be_mapped() { - Cluster cluster1 = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster1 = register(createClusterBuilder().build()); Session session1 = cluster1.connect(); session1.execute( "create keyspace if not exists java_509b " @@ -100,12 +85,7 @@ public void udt_and_tables_without_ks_created_in_another_session_should_be_mappe cluster1.close(); // Create entities with another connection - Cluster cluster2 = - register( - Cluster.builder() - .addContactPoints(getContactPoints().get(0)) - .withPort(ccm().getBinaryPort()) - .build()); + Cluster cluster2 = register(createClusterBuilder().build()); Session session2 = cluster2.newSession(); session2.execute("use java_509b"); diff --git a/driver-tests/osgi/pom.xml b/driver-tests/osgi/pom.xml index b7fcdf3c2e2..b25dc57a487 100644 --- a/driver-tests/osgi/pom.xml +++ b/driver-tests/osgi/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-tests-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-tests-osgi diff --git a/driver-tests/pom.xml b/driver-tests/pom.xml index 30d897ba7b5..a1a86ffa249 100644 --- a/driver-tests/pom.xml +++ b/driver-tests/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-tests-parent diff --git a/driver-tests/shading/pom.xml b/driver-tests/shading/pom.xml index 7b225c6f13e..e83da598a93 100644 --- a/driver-tests/shading/pom.xml +++ b/driver-tests/shading/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-tests-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT pom diff --git a/driver-tests/shading/shaded/pom.xml b/driver-tests/shading/shaded/pom.xml index 489ef95963e..a8e5e7f259c 100644 --- a/driver-tests/shading/shaded/pom.xml +++ b/driver-tests/shading/shaded/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-tests-shading - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-tests-shading-shaded diff --git a/driver-tests/shading/unshaded/pom.xml b/driver-tests/shading/unshaded/pom.xml index 3d176fffcd3..8b05ccd8d27 100644 --- a/driver-tests/shading/unshaded/pom.xml +++ b/driver-tests/shading/unshaded/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-tests-shading - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-tests-shading-unshaded diff --git a/driver-tests/stress/pom.xml b/driver-tests/stress/pom.xml index 99ed63c61e4..05358ae8f58 100644 --- a/driver-tests/stress/pom.xml +++ b/driver-tests/stress/pom.xml @@ -26,7 +26,7 @@ com.scylladb scylla-driver-tests-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT scylla-driver-tests-stress diff --git a/faq/README.md b/faq/README.md index 8ef0dd017b6..f2481fd5807 100644 --- a/faq/README.md +++ b/faq/README.md @@ -35,7 +35,7 @@ row.getBool(0); // this is equivalent row.getBool("applied") Note that, unlike manual inspection, `wasApplied` does not consume the first row. -[wasApplied]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSet.html#wasApplied-- +[wasApplied]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html#wasApplied-- ### What is a parameterized statement and how can I use it? @@ -289,7 +289,7 @@ version is available, you may want to reach out to the maintainer of that tool t an update with compatibility to this driver version. -[Blobs.java]: https://github.com/datastax/java-driver/tree/3.10.0/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java +[Blobs.java]: https://github.com/datastax/java-driver/tree/3.11.2/driver-examples/src/main/java/com/datastax/driver/examples/datatypes/Blobs.java [CASSANDRA-7304]: https://issues.apache.org/jira/browse/CASSANDRA-7304 [Parameters and Binding]: ../manual/statements/prepared/#parameters-and-binding [Mapper options]: ../manual/object_mapper/using/#mapper-options diff --git a/faq/osgi/README.md b/faq/osgi/README.md index 06f780333fe..37122b8e957 100644 --- a/faq/osgi/README.md +++ b/faq/osgi/README.md @@ -160,7 +160,7 @@ it is also normal to see the following log lines when starting the driver: [BND]:http://bnd.bndtools.org/ [Maven bundle plugin]:https://cwiki.apache.org/confluence/display/FELIX/Apache+Felix+Maven+Bundle+Plugin+%28BND%29 [OSGi examples repository]:https://github.com/datastax/java-driver-examples-osgi -[without metrics]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- +[without metrics]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- [SLF4J]:http://www.slf4j.org/ [Logback]:http://logback.qos.ch/ [Tycho]:https://eclipse.org/tycho/ diff --git a/manual/README.md b/manual/README.md index ccb619d2adc..3c3075899db 100644 --- a/manual/README.md +++ b/manual/README.md @@ -209,7 +209,7 @@ String firstName = row.getString("first_name"); blob getBytes java.nio.ByteBuffer boolean getBool boolean counter getLong long - date getDate LocalDate + date getDate LocalDate decimal getDecimal java.math.BigDecimal double getDouble double float getFloat float @@ -291,20 +291,20 @@ menu on the left hand side to navigate sub-sections. If you're [browsing the sou github.com](https://github.com/datastax/java-driver/tree/3.x/manual), simply navigate to each sub-directory. -[Cluster]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.html -[Cluster.Builder]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html -[Initializer]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Initializer.html -[Session]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html -[ResultSet]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSet.html -[Row]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Row.html -[NettyOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/NettyOptions.html -[QueryOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryOptions.html -[SocketOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html -[Host.StateListener]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Host.StateListener.html -[LatencyTracker]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/LatencyTracker.html -[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SchemaChangeListener.html -[NoHostAvailableException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/NoHostAvailableException.html -[LocalDate]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/LocalDate.html +[Cluster]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html +[Cluster.Builder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html +[Initializer]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Initializer.html +[Session]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html +[ResultSet]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html +[Row]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Row.html +[NettyOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/NettyOptions.html +[QueryOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html +[SocketOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html +[Host.StateListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Host.StateListener.html +[LatencyTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/LatencyTracker.html +[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SchemaChangeListener.html +[NoHostAvailableException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[LocalDate]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/LocalDate.html ```eval_rst .. toctree:: diff --git a/manual/address_resolution/README.md b/manual/address_resolution/README.md index 8f0ffd33740..0662ab92295 100644 --- a/manual/address_resolution/README.md +++ b/manual/address_resolution/README.md @@ -102,8 +102,8 @@ private/public switch automatically based on location). -[AddressTranslator]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/AddressTranslator.html -[EC2MultiRegionAddressTranslator]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.html +[AddressTranslator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/AddressTranslator.html +[EC2MultiRegionAddressTranslator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/EC2MultiRegionAddressTranslator.html [cassandra.yaml]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html [rpc_address]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html?scroll=configCassandra_yaml__rpc_address diff --git a/manual/async/README.md b/manual/async/README.md index 21a70f7b767..43523cd548c 100644 --- a/manual/async/README.md +++ b/manual/async/README.md @@ -51,8 +51,8 @@ to the current page, and [fetchMoreResults] to get a future to the next page (see also the section on [paging](../paging/)). Here is a full example: -[getAvailableWithoutFetching]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSet.html#getAvailableWithoutFetching-- -[fetchMoreResults]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSet.html#fetchMoreResults-- +[getAvailableWithoutFetching]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html#getAvailableWithoutFetching-- +[fetchMoreResults]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html#fetchMoreResults-- ```java Statement statement = new SimpleStatement("select * from foo").setFetchSize(20); @@ -134,5 +134,5 @@ There are still a few places where the driver will block internally hasn't been fetched already. [ListenableFuture]: https://github.com/google/guava/wiki/ListenableFutureExplained -[init]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.html#init-- -[query trace]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryTrace.html +[init]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#init-- +[query trace]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryTrace.html diff --git a/manual/auth/README.md b/manual/auth/README.md index c8fc021b6f9..22838a4836d 100644 --- a/manual/auth/README.md +++ b/manual/auth/README.md @@ -28,7 +28,7 @@ You can also write your own provider; it must implement [AuthProvider]. [SASL]: https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer -[Cluster.Builder.withCredentials]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withCredentials-java.lang.String-java.lang.String- -[AuthProvider]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/AuthProvider.html -[Cluster.Builder.withAuthProvider]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withAuthProvider-com.datastax.driver.core.AuthProvider- -[PlainTextAuthProvider]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PlainTextAuthProvider.html +[Cluster.Builder.withCredentials]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withCredentials-java.lang.String-java.lang.String- +[AuthProvider]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/AuthProvider.html +[Cluster.Builder.withAuthProvider]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withAuthProvider-com.datastax.driver.core.AuthProvider- +[PlainTextAuthProvider]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PlainTextAuthProvider.html diff --git a/manual/compression/README.md b/manual/compression/README.md index 99817a9285b..3d3ffabc04e 100644 --- a/manual/compression/README.md +++ b/manual/compression/README.md @@ -88,4 +88,4 @@ cluster = Cluster.builder() .build(); ``` -[pom]: https://repo1.maven.org/maven2/com/scylladb/scylla-driver-parent/3.10.2.0/scylla-driver-parent-3.10.2.0.pom \ No newline at end of file +[pom]: https://repo1.maven.org/maven2/com/scylladb/scylla-driver-parent/3.11.2.0/scylla-driver-parent-3.11.2.0.pom diff --git a/manual/control_connection/README.md b/manual/control_connection/README.md index eb610627941..189ba31b82b 100644 --- a/manual/control_connection/README.md +++ b/manual/control_connection/README.md @@ -18,4 +18,4 @@ used exclusively for administrative requests. It is included in [Session.State.g as well as the `open-connections` [metric](../metrics); for example, if you've configured a pool size of 2, the control node will have 3 connections. -[Session.State.getOpenConnections]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.State.html#getOpenConnections-com.datastax.driver.core.Host- \ No newline at end of file +[Session.State.getOpenConnections]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.State.html#getOpenConnections-com.datastax.driver.core.Host- \ No newline at end of file diff --git a/manual/custom_codecs/README.md b/manual/custom_codecs/README.md index 108d7dab1d4..3d586a028f8 100644 --- a/manual/custom_codecs/README.md +++ b/manual/custom_codecs/README.md @@ -447,26 +447,26 @@ Beware that in these cases, the lookup performs in average 10x worse. If perform consider using prepared statements all the time. [JAVA-721]: https://datastax-oss.atlassian.net/browse/JAVA-721 -[TypeCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TypeCodec.html -[LocalDate]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/LocalDate.html +[TypeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html +[LocalDate]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/LocalDate.html [ByteBuffer]: http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html -[serialize]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TypeCodec.html#serialize-T-com.datastax.driver.core.ProtocolVersion- -[deserialize]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TypeCodec.html#deserialize-java.nio.ByteBuffer-com.datastax.driver.core.ProtocolVersion- -[TypeCodec.format]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TypeCodec.html#format-T- -[TypeCodec.parse]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TypeCodec.html#parse-java.lang.String- -[accepts]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TypeCodec.html#accepts-com.datastax.driver.core.DataType- -[CodecRegistry]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/CodecRegistry.html -[CodecNotFoundException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/CodecNotFoundException.html +[serialize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#serialize-T-com.datastax.driver.core.ProtocolVersion- +[deserialize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#deserialize-java.nio.ByteBuffer-com.datastax.driver.core.ProtocolVersion- +[TypeCodec.format]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#format-T- +[TypeCodec.parse]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#parse-java.lang.String- +[accepts]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TypeCodec.html#accepts-com.datastax.driver.core.DataType- +[CodecRegistry]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/CodecRegistry.html +[CodecNotFoundException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/CodecNotFoundException.html [Jackson]: https://github.com/FasterXML/jackson [AbstractType]: https://github.com/apache/cassandra/blob/trunk/src/java/org/apache/cassandra/db/marshal/AbstractType.java -[UserType]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/UserType.html -[UDTValue]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/UDTValue.html -[TupleType]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TupleType.html -[TupleValue]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TupleValue.html -[CustomType]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/DataType.CustomType.html +[UserType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UserType.html +[UDTValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UDTValue.html +[TupleType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html +[TupleValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleValue.html +[CustomType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/DataType.CustomType.html [TypeToken]: https://google.github.io/guava/releases/19.0/api/docs/com/google/common/reflect/TypeToken.html -[SimpleStatement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SimpleStatement.html -[BuiltStatement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/querybuilder/BuiltStatement.html -[setList]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SettableByIndexData.html#setList-int-java.util.List- -[setSet]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SettableByIndexData.html#setSet-int-java.util.Set- -[setMap]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SettableByIndexData.html#setMap-int-java.util.Map- +[SimpleStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SimpleStatement.html +[BuiltStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/BuiltStatement.html +[setList]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SettableByIndexData.html#setList-int-java.util.List- +[setSet]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SettableByIndexData.html#setSet-int-java.util.Set- +[setMap]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SettableByIndexData.html#setMap-int-java.util.Map- diff --git a/manual/custom_codecs/extras/README.md b/manual/custom_codecs/extras/README.md index 276cdf8b80f..523cc89c355 100644 --- a/manual/custom_codecs/extras/README.md +++ b/manual/custom_codecs/extras/README.md @@ -13,7 +13,7 @@ The module is published as a separate Maven artifact: com.scylladb scylla-driver-extras - 3.10.2.0 + 3.11.2.0 ``` @@ -76,12 +76,12 @@ session.execute("INSERT INTO example (id, t) VALUES (1, ?)", ZoneId.of("GMT+07:00")); ``` -[InstantCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/InstantCodec.html -[LocalDateCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/LocalDateCodec.html -[LocalDateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/LocalDateTimeCodec.html -[LocalTimeCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/LocalTimeCodec.html -[ZonedDateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/ZonedDateTimeCodec.html -[ZoneIdCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/ZoneIdCodec.html +[InstantCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/InstantCodec.html +[LocalDateCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/LocalDateCodec.html +[LocalDateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/LocalDateTimeCodec.html +[LocalTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/LocalTimeCodec.html +[ZonedDateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/ZonedDateTimeCodec.html +[ZoneIdCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/ZoneIdCodec.html [Instant]: https://docs.oracle.com/javase/8/docs/api/java/time/Instant.html [LocalDate]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDate.html [LocalDateTime]: https://docs.oracle.com/javase/8/docs/api/java/time/LocalDateTime.html @@ -133,10 +133,10 @@ session.execute("INSERT INTO example (id, t) VALUES (1, ?)", DateTime.parse("2010-06-30T01:20:47.999+01:00")); ``` -[InstantCodec_joda]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/joda/InstantCodec.html -[LocalDateCodec_joda]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/joda/LocalDateCodec.html -[LocalTimeCodec_joda]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/joda/LocalTimeCodec.html -[DateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/joda/DateTimeCodec.html +[InstantCodec_joda]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/InstantCodec.html +[LocalDateCodec_joda]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/LocalDateCodec.html +[LocalTimeCodec_joda]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/LocalTimeCodec.html +[DateTimeCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/joda/DateTimeCodec.html [DateTime]: http://www.joda.org/joda-time/apidocs/org/joda/time/DateTime.html [Instant_joda]: http://www.joda.org/joda-time/apidocs/org/joda/time/Instant.html [LocalDate_joda]: http://www.joda.org/joda-time/apidocs/org/joda/time/LocalDate.html @@ -154,8 +154,8 @@ Time can also be expressed as simple durations: There is no extra codec for `time`, because by default the driver already maps that type to a `long` representing the number of nanoseconds since midnight. -[SimpleTimestampCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/date/SimpleTimestampCodec.html -[SimpleDateCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/date/SimpleDateCodec.html +[SimpleTimestampCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/date/SimpleTimestampCodec.html +[SimpleDateCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/date/SimpleDateCodec.html ### Enums @@ -193,8 +193,8 @@ Note that if you registered an `EnumNameCodec` and an `EnumOrdinalCodec` _for th In practice, this is unlikely to happen, because you'll probably stick to a single CQL type for a given enum type; however, if you ever run into that issue, the workaround is to use [prepared statements](../../statements/prepared/), for which the driver knows the CQL type and can pick the exact codec. -[EnumNameCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/enums/EnumNameCodec.html -[EnumOrdinalCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/enums/EnumOrdinalCodec.html +[EnumNameCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/enums/EnumNameCodec.html +[EnumOrdinalCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/enums/EnumOrdinalCodec.html [name]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#name-- [ordinal]: https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html#ordinal-- @@ -239,7 +239,7 @@ session.execute("insert into example (id, owner) values (1, ?)", // owner saved as '{"id":1,"name":"root"}' ``` -[JacksonJsonCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/json/JacksonJsonCodec.html +[JacksonJsonCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/json/JacksonJsonCodec.html [Jackson]: https://github.com/FasterXML/jackson @@ -278,7 +278,7 @@ session.execute("insert into example (id, owner) values (1, ?)", ``` -[Jsr353JsonCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/json/Jsr353JsonCodec.html +[Jsr353JsonCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/json/Jsr353JsonCodec.html [JsonStructure]: https://docs.oracle.com/javaee/7/tutorial/jsonp002.htm @@ -331,7 +331,7 @@ For the same reason, we need to give a type hint when setting "v", in the form o anonymous inner class; we recommend storing these tokens as constants in a utility class, to avoid creating them too often. -[OptionalCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/jdk8/OptionalCodec.html +[OptionalCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/jdk8/OptionalCodec.html [Optional]: https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html [TypeToken]: http://google.github.io/guava/releases/19.0/api/docs/com/google/common/reflect/TypeToken.html @@ -375,7 +375,7 @@ session.execute(pst.bind() See the JDK8 Optional section above for explanations about [TypeToken]. -[OptionalCodec_guava]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/guava/OptionalCodec.html +[OptionalCodec_guava]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/guava/OptionalCodec.html [Optional_guava]: http://google.github.io/guava/releases/19.0/api/docs/com/google/common/base/Optional.html @@ -400,9 +400,9 @@ session.execute("insert into example (i, l) values (1, ?)", Package [com.datastax.driver.extras.codecs.arrays][arrays] contains similar codecs for all primitive types, and [ObjectArrayCodec] to map arrays of objects. -[IntArrayCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/arrays/IntArrayCodec.html -[ObjectArrayCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/arrays/ObjectArrayCodec.html -[arrays]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/arrays/package-summary.html +[IntArrayCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/arrays/IntArrayCodec.html +[ObjectArrayCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/arrays/ObjectArrayCodec.html +[arrays]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/arrays/package-summary.html ### Abstract utilities @@ -432,5 +432,5 @@ These two classes are convenient, but since they perform conversions in two step optimal approach. If performance is paramount, it's better to start from scratch and convert your objects to `ByteBuffer` directly. -[MappingCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/MappingCodec.html -[ParsingCodec]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/extras/codecs/ParsingCodec.html +[MappingCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/MappingCodec.html +[ParsingCodec]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/extras/codecs/ParsingCodec.html diff --git a/manual/custom_payloads/README.md b/manual/custom_payloads/README.md index a31cee5161b..7a4e726b9cd 100644 --- a/manual/custom_payloads/README.md +++ b/manual/custom_payloads/README.md @@ -241,8 +241,8 @@ The log message contains a pretty-printed version of the payload itself, and its [CASSANDRA-8553]: https://issues.apache.org/jira/browse/CASSANDRA-8553 [v4spec]: https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec [qh]: https://issues.apache.org/jira/browse/CASSANDRA-6659 -[nhae]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[nhae]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html [chm]: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html [immutablemap]: http://docs.guava-libraries.googlecode.com/git/javadoc/com/google/common/collect/ImmutableMap.html -[ufe]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/UnsupportedFeatureException.html +[ufe]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/UnsupportedFeatureException.html diff --git a/manual/idempotence/README.md b/manual/idempotence/README.md index 71246748735..6894d5e889a 100644 --- a/manual/idempotence/README.md +++ b/manual/idempotence/README.md @@ -125,8 +125,8 @@ broke linearizability by doing a transparent retry at step 6. If linearizability is important for you, you should ensure that lightweight transactions are appropriately flagged as not idempotent. -[isIdempotent]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Statement.html#isIdempotent-- -[setDefaultIdempotence]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryOptions.html#setDefaultIdempotence-boolean- -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/querybuilder/QueryBuilder.html +[isIdempotent]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#isIdempotent-- +[setDefaultIdempotence]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setDefaultIdempotence-boolean- +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html [linearizability]: https://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability \ No newline at end of file diff --git a/manual/load_balancing/README.md b/manual/load_balancing/README.md index dad615adeb8..4568c8025b5 100644 --- a/manual/load_balancing/README.md +++ b/manual/load_balancing/README.md @@ -277,11 +277,11 @@ For any host, the distance returned by the policy is always the same as its chil Query plans are based on the child policy's, except that hosts that are currently excluded for being too slow are moved to the end of the plan. -[withExclusionThreshold]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withExclusionThreshold-double- -[withMininumMeasurements]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withMininumMeasurements-int- -[withRetryPeriod]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withRetryPeriod-long-java.util.concurrent.TimeUnit- -[withScale]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withScale-long-java.util.concurrent.TimeUnit- -[withUpdateRate]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withUpdateRate-long-java.util.concurrent.TimeUnit- +[withExclusionThreshold]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withExclusionThreshold-double- +[withMininumMeasurements]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withMininumMeasurements-int- +[withRetryPeriod]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withRetryPeriod-long-java.util.concurrent.TimeUnit- +[withScale]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withScale-long-java.util.concurrent.TimeUnit- +[withUpdateRate]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.Builder.html#withUpdateRate-long-java.util.concurrent.TimeUnit- ### Filtering policies @@ -299,15 +299,15 @@ studying the existing implementations first: `RoundRobinPolicy` is a good place complex ones like `DCAwareRoundRobinPolicy`. -[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LoadBalancingPolicy.html -[RoundRobinPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RoundRobinPolicy.html -[DCAwareRoundRobinPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.html -[TokenAwarePolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/TokenAwarePolicy.html -[LatencyAwarePolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LatencyAwarePolicy.html -[HostFilterPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/HostFilterPolicy.html -[WhiteListPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/WhiteListPolicy.html -[HostDistance]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/HostDistance.html -[refreshConnectedHosts]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html#refreshConnectedHosts-- -[setMetadataEnabled]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryOptions.html#setMetadataEnabled-boolean- -[Statement#getKeyspace]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Statement.html#getKeyspace-- -[Statement#getRoutingKey]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Statement.html#getRoutingKey-- +[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[RoundRobinPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RoundRobinPolicy.html +[DCAwareRoundRobinPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.html +[TokenAwarePolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/TokenAwarePolicy.html +[LatencyAwarePolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LatencyAwarePolicy.html +[HostFilterPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/HostFilterPolicy.html +[WhiteListPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/WhiteListPolicy.html +[HostDistance]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/HostDistance.html +[refreshConnectedHosts]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#refreshConnectedHosts-- +[setMetadataEnabled]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setMetadataEnabled-boolean- +[Statement#getKeyspace]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#getKeyspace-- +[Statement#getRoutingKey]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#getRoutingKey-- diff --git a/manual/logging/README.md b/manual/logging/README.md index d68f0c2d45c..a421be520ad 100644 --- a/manual/logging/README.md +++ b/manual/logging/README.md @@ -208,12 +208,12 @@ Aggregation query used without partition key ``` These -[query warnings](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) +[query warnings](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) are available programmatically from the -[ExecutionInfo](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ExecutionInfo.html) +[ExecutionInfo](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html) via -[ResultSet](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSet.html)'s -[getExecutionInfo()](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PagingIterable.html#getExecutionInfo--) +[ResultSet](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html)'s +[getExecutionInfo()](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PagingIterable.html#getExecutionInfo--) method. They are also logged by the driver: ``` @@ -340,4 +340,4 @@ It also turns on slow query tracing as described above. ``` -[query_logger]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryLogger.html +[query_logger]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryLogger.html diff --git a/manual/metadata/README.md b/manual/metadata/README.md index fd65ed90e4e..93dc169d9b4 100644 --- a/manual/metadata/README.md +++ b/manual/metadata/README.md @@ -4,7 +4,7 @@ The driver maintains global information about the Cassandra cluster it is connected to. It is available via [Cluster#getMetadata()][getMetadata]. -[getMetadata]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.html#getMetadata-- +[getMetadata]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#getMetadata-- ### Schema metadata @@ -12,8 +12,8 @@ Use [getKeyspace(String)][getKeyspace] or [getKeyspaces()][getKeyspaces] to get keyspace-level metadata. From there you can access the keyspace's objects (tables, and UDTs and UDFs if relevant). -[getKeyspace]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#getKeyspace-java.lang.String- -[getKeyspaces]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#getKeyspaces-- +[getKeyspace]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getKeyspace-java.lang.String- +[getKeyspaces]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getKeyspaces-- #### Refreshes @@ -47,8 +47,8 @@ Note that it is preferable to register a listener only *after* the cluster is fu otherwise the listener could be notified with a great deal of "Added" events as the driver builds the schema metadata from scratch for the first time. -[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SchemaChangeListener.html -[registerListener]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.html#register-com.datastax.driver.core.SchemaChangeListener- +[SchemaChangeListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SchemaChangeListener.html +[registerListener]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#register-com.datastax.driver.core.SchemaChangeListener- #### Schema agreement @@ -135,9 +135,9 @@ custom executor). Check out the API docs for the features in this section: -* [withMaxSchemaAgreementWaitSeconds(int)](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withMaxSchemaAgreementWaitSeconds-int-) -* [isSchemaInAgreement()](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ExecutionInfo.html#isSchemaInAgreement--) -* [checkSchemaAgreement()](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#checkSchemaAgreement--) +* [withMaxSchemaAgreementWaitSeconds(int)](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withMaxSchemaAgreementWaitSeconds-int-) +* [isSchemaInAgreement()](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#isSchemaInAgreement--) +* [checkSchemaAgreement()](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#checkSchemaAgreement--) ### Token metadata @@ -181,14 +181,14 @@ Starting with Cassandra 2.1.5, this information is available in a system table (see [CASSANDRA-7688](https://issues.apache.org/jira/browse/CASSANDRA-7688)). -[metadata]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html -[getTokenRanges]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#getTokenRanges-- -[getTokenRanges2]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#getTokenRanges-java.lang.String-com.datastax.driver.core.Host- -[getReplicas]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#getReplicas-java.lang.String-com.datastax.driver.core.TokenRange- -[newToken]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#newToken-java.lang.String- -[newTokenRange]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metadata.html#newTokenRange-com.datastax.driver.core.Token-com.datastax.driver.core.Token- -[TokenRange]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TokenRange.html -[getTokens]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Host.html#getTokens-- -[setToken]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BoundStatement.html#setToken-int-com.datastax.driver.core.Token- -[getToken]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Row.html#getToken-int- -[getPKToken]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Row.html#getPartitionKeyToken-- +[metadata]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html +[getTokenRanges]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getTokenRanges-- +[getTokenRanges2]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getTokenRanges-java.lang.String-com.datastax.driver.core.Host- +[getReplicas]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#getReplicas-java.lang.String-com.datastax.driver.core.TokenRange- +[newToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#newToken-java.lang.String- +[newTokenRange]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metadata.html#newTokenRange-com.datastax.driver.core.Token-com.datastax.driver.core.Token- +[TokenRange]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TokenRange.html +[getTokens]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Host.html#getTokens-- +[setToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BoundStatement.html#setToken-int-com.datastax.driver.core.Token- +[getToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Row.html#getToken-int- +[getPKToken]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Row.html#getPartitionKeyToken-- diff --git a/manual/metrics/README.md b/manual/metrics/README.md index 6326ef9c091..c086a271b9e 100644 --- a/manual/metrics/README.md +++ b/manual/metrics/README.md @@ -38,7 +38,7 @@ To do this in a maven project: com.scylladb scylla-driver-core - 3.10.2.0 + 3.11.2.0 io.dropwizard.metrics @@ -146,8 +146,8 @@ reporter.start(); [Reporters]: http://metrics.dropwizard.io/3.2.2/manual/core.html#reporters [MetricRegistry]: http://metrics.dropwizard.io/3.2.2/apidocs/com/codahale/metrics/MetricRegistry.html [MXBeans]: https://docs.oracle.com/javase/tutorial/jmx/mbeans/mxbeans.html -[withClusterName]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withClusterName-java.lang.String- -[withoutMetrics]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- -[withoutJMXReporting]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withoutJMXReporting-- -[getMetrics]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.html#getMetrics-- -[Metrics]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metrics.html +[withClusterName]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withClusterName-java.lang.String- +[withoutMetrics]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withoutMetrics-- +[withoutJMXReporting]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withoutJMXReporting-- +[getMetrics]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.html#getMetrics-- +[Metrics]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metrics.html diff --git a/manual/native_protocol/README.md b/manual/native_protocol/README.md index e07ff9fe8bd..d2ad6212847 100644 --- a/manual/native_protocol/README.md +++ b/manual/native_protocol/README.md @@ -16,17 +16,23 @@ Cassandra when the first connection is established. Both sides are backward-compatible with older versions: - - - - - + + + + + + + + + + +
Driver VersionCassandra: 1.2.x
(DSE 3.2)
2.0.x
(DSE 4.0 to 4.6) -
2.1.x
(DSE 4.7)
2.2.x3.0.x & 3.x
(DSE 5.0+)
1.0.x v1 v1 v1 v1 Unsupported (1)
2.0.x to 2.1.1 v1 v2 v2 v2 Unsupported (1)
2.1.2 to 2.1.x v1 v2 v3 v3 Unsupported (2)
3.x v1 v2 v3 v4 v4
Driver VersionCassandra 1.2.x
(DSE 3.2)
Cassandra 2.0.x
(DSE 4.0 to 4.6)
Cassandra 2.1.x
(DSE 4.7)
Cassandra 2.2.xCassandra 3.0.x & 3.x
(DSE 5.0+)
Cassandra 4.0+
1.0.x v1 v1 v1 v1 Unsupported (1)Unsupported (1)
2.0.x to 2.1.1 v1 v2 v2 v2Unsupported (1) Unsupported (1)
2.1.2 to 2.1.x v1 v2 v3 v3Unsupported (2)Unsupported (2)
3.x v1 v2 v3 v4 v4 v5
-*(1) Cassandra 3.0 does not support protocol versions v1 and v2* +*(1) Cassandra 3.0+ does not support protocol versions v1 and v2* -*(2) There is a matching protocol version (v3), but the driver 2.1.x can't read the new system table format of Cassandra 3.0* +*(2) There is a matching protocol version (v3), but the driver 2.1.x can't read the new system table +format of Cassandra 3.0+* For example, if you use version 2.1.5 of the driver to connect to Cassandra 2.0.9, the maximum version you can use (and the one you'll get @@ -64,7 +70,7 @@ All host(s) tried for query failed [/127.0.0.1:9042] Host /127.0.0.1:9042 does not support protocol version V3 but V2)) ``` -[gpv]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- +[gpv]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- #### Protocol version with mixed clusters @@ -95,19 +101,19 @@ To avoid this issue, you can use one the following workarounds: #### v1 to v2 * bound variables in simple statements - ([Session#execute(String, Object...)](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html#execute-java.lang.String-java.lang.Object...-)) -* [batch statements](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BatchStatement.html) + ([Session#execute(String, Object...)](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#execute-java.lang.String-java.lang.Object...-)) +* [batch statements](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.html) * [query paging](../paging/) #### v2 to v3 * the number of stream ids per connection goes from 128 to 32768 (see [Connection pooling](../pooling/)) -* [serial consistency on batch statements](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BatchStatement.html#setSerialConsistencyLevel-com.datastax.driver.core.ConsistencyLevel-) +* [serial consistency on batch statements](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.html#setSerialConsistencyLevel-com.datastax.driver.core.ConsistencyLevel-) * [client-side timestamps](../query_timestamps/) #### v3 to v4 -* [query warnings](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) +* [query warnings](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#getWarnings--) * allowed unset values in bound statements * [Custom payloads](../custom_payloads/) diff --git a/manual/object_mapper/README.md b/manual/object_mapper/README.md index 0cba257b993..e1e518aaea7 100644 --- a/manual/object_mapper/README.md +++ b/manual/object_mapper/README.md @@ -11,7 +11,7 @@ The mapper is published as a separate Maven artifact: com.scylladb scylla-driver-mapping - 3.10.2.0 + 3.11.2.0 ``` diff --git a/manual/object_mapper/creating/README.md b/manual/object_mapper/creating/README.md index dbd5fc5b826..c91b2a9c92e 100644 --- a/manual/object_mapper/creating/README.md +++ b/manual/object_mapper/creating/README.md @@ -149,9 +149,9 @@ User user = new User() .setName("John Doe"); ``` -[table]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Table.html +[table]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Table.html [case-sensitive]:http://docs.datastax.com/en/cql/3.3/cql/cql_reference/ucase-lcase_r.html -[consistency level]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ConsistencyLevel.html +[consistency level]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ConsistencyLevel.html [java-beans]:https://docs.oracle.com/javase/tutorial/javabeans/writing/properties.html [set-accessible]:https://docs.oracle.com/javase/8/docs/api/java/lang/reflect/AccessibleObject.html#setAccessible-boolean- @@ -189,7 +189,7 @@ CREATE TABLE users(id uuid PRIMARY KEY, "userName" text); private String userName; ``` -[column]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Column.html +[column]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Column.html #### Primary key fields @@ -213,8 +213,8 @@ private String areaCode; The order of the indices must match that of the columns in the table declaration. -[pk]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/PartitionKey.html -[cc]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/ClusteringColumn.html +[pk]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/PartitionKey.html +[cc]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/ClusteringColumn.html [pks]:http://thelastpickle.com/blog/2013/01/11/primary-keys-in-cql.html #### Computed fields @@ -250,7 +250,7 @@ version (see [JAVA-832](https://datastax-oss.atlassian.net/browse/JAVA-832)). [User Defined Functions]:http://www.planetcassandra.org/blog/user-defined-functions-in-cassandra-3-0/ -[computed]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Computed.html +[computed]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Computed.html #### Transient properties @@ -259,7 +259,7 @@ to table columns. [@Transient][transient] can be used to prevent a field or a Java bean property from being mapped. Like other column-level annotations, it should be placed on either the field declaration or the property getter method. -[transient]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Transient.html +[transient]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Transient.html ### Mapping User Types @@ -322,8 +322,8 @@ This also works with UDTs inside collections or other UDTs, with any arbitrary nesting level. [User Defined Types]: ../../udts/ -[udt]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/UDT.html -[field]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Field.html +[udt]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/UDT.html +[field]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Field.html ### Mapping collections @@ -381,10 +381,10 @@ to figure out how to appropriately handle UDT conversion, i.e.: mappingManager.udtCodec(Address.class); ``` -[frozen]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Frozen.html -[frozenkey]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/FrozenKey.html -[frozenvalue]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/FrozenValue.html -[udtCodec]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/MappingManager.html#udtCodec-java.lang.Class- +[frozen]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Frozen.html +[frozenkey]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/FrozenKey.html +[frozenvalue]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/FrozenValue.html +[udtCodec]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/MappingManager.html#udtCodec-java.lang.Class- #### Prefer Frozen Collections diff --git a/manual/object_mapper/custom_codecs/README.md b/manual/object_mapper/custom_codecs/README.md index c68f59ba67f..821e1f619e4 100644 --- a/manual/object_mapper/custom_codecs/README.md +++ b/manual/object_mapper/custom_codecs/README.md @@ -98,9 +98,9 @@ instance (one per column) and cache it for future use. This also works with [@Field][field] and [@Param][param] annotations. -[column]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Column.html -[field]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Field.html -[param]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Param.html +[column]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Column.html +[field]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Field.html +[param]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Param.html ## Implicit UDT codecs diff --git a/manual/object_mapper/using/README.md b/manual/object_mapper/using/README.md index f6367ecf203..4587f983d77 100644 --- a/manual/object_mapper/using/README.md +++ b/manual/object_mapper/using/README.md @@ -28,9 +28,9 @@ Mapper mapper = manager.mapper(User.class); calling `manager#mapper` more than once for the same class will return the previously generated mapper. -[Mapper]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/Mapper.html -[MappingManager]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/MappingManager.html -[Session]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html +[Mapper]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/Mapper.html +[MappingManager]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/MappingManager.html +[Session]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html #### Basic CRUD operations @@ -179,7 +179,7 @@ It provides methods `one()`, `all()`, `iterator()`, `getExecutionInfo()` and `isExhausted()`. Note that iterating the `Result` will consume the `ResultSet`, and vice-versa. -[Result]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/Result.html +[Result]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/Result.html ### Accessors @@ -229,7 +229,7 @@ corresponds to which marker: ResultSet insert(@Param("u") UUID userId, @Param("n") String name); ``` -[param]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/Param.html +[param]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/Param.html If a method argument is a Java enumeration, it must be annotated with `@Enumerated` to indicate how to convert it to a CQL type (the rules are @@ -301,7 +301,7 @@ query with the annotation [@QueryParameters]. Then, options like public ListenableFuture> getAllAsync(); ``` -[@QueryParameters]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/annotations/QueryParameters.html +[@QueryParameters]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/annotations/QueryParameters.html ### Mapping configuration @@ -345,6 +345,6 @@ PropertyMapper propertyMapper = new DefaultPropertyMapper() There is more to `DefaultPropertyMapper`; see the Javadocs and implementation for details. -[MappingConfiguration]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/MappingConfiguration.html -[PropertyMapper]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/PropertyMapper.html -[DefaultPropertyMapper]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/mapping/DefaultPropertyMapper.html +[MappingConfiguration]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/MappingConfiguration.html +[PropertyMapper]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/PropertyMapper.html +[DefaultPropertyMapper]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/mapping/DefaultPropertyMapper.html diff --git a/manual/paging/README.md b/manual/paging/README.md index d3628cbfaa8..15343d3c7f0 100644 --- a/manual/paging/README.md +++ b/manual/paging/README.md @@ -176,8 +176,8 @@ if (nextPage != null) { } ``` -[result_set]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSet.html -[paging_state]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PagingState.html +[result_set]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSet.html +[paging_state]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PagingState.html Due to internal implementation details, `PagingState` instances are not @@ -239,8 +239,8 @@ There are two situations where you might want to use the unsafe API: implementing your own validation logic (for example, signing the raw state with a private key). -[gpsu]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ExecutionInfo.html#getPagingStateUnsafe-- -[spsu]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Statement.html#setPagingStateUnsafe-byte:A- +[gpsu]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ExecutionInfo.html#getPagingStateUnsafe-- +[spsu]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html#setPagingStateUnsafe-byte:A- ### Offset queries diff --git a/manual/pooling/README.md b/manual/pooling/README.md index ab13653b481..3b2e8b0218b 100644 --- a/manual/pooling/README.md +++ b/manual/pooling/README.md @@ -285,16 +285,16 @@ either: [newConnectionThreshold][nct] so that enough connections are added by the time you reach the bottleneck. -[result_set_future]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ResultSetFuture.html -[pooling_options]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html -[lbp]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LoadBalancingPolicy.html -[nct]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html#setNewConnectionThreshold-com.datastax.driver.core.HostDistance-int- -[mrpc]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html#setMaxRequestsPerConnection-com.datastax.driver.core.HostDistance-int- -[sits]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html#setIdleTimeoutSeconds-int- -[rtm]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#getReadTimeoutMillis-- -[smqs]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html#setMaxQueueSize-int- -[sptm]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PoolingOptions.html#setPoolTimeoutMillis-int- -[nhae]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/NoHostAvailableException.html -[getErrors]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- -[get_state]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html#getState-- -[BusyPoolException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/BusyPoolException.html +[result_set_future]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ResultSetFuture.html +[pooling_options]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html +[lbp]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[nct]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setNewConnectionThreshold-com.datastax.driver.core.HostDistance-int- +[mrpc]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setMaxRequestsPerConnection-com.datastax.driver.core.HostDistance-int- +[sits]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setIdleTimeoutSeconds-int- +[rtm]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#getReadTimeoutMillis-- +[smqs]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setMaxQueueSize-int- +[sptm]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PoolingOptions.html#setPoolTimeoutMillis-int- +[nhae]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[getErrors]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- +[get_state]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#getState-- +[BusyPoolException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/BusyPoolException.html diff --git a/manual/query_timestamps/README.md b/manual/query_timestamps/README.md index 20f4e9c6a47..fb24690ad95 100644 --- a/manual/query_timestamps/README.md +++ b/manual/query_timestamps/README.md @@ -140,10 +140,10 @@ following: Steps 2 and 3 only apply if native protocol v3 or above is in use. -[TimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TimestampGenerator.html -[AtomicMonotonicTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.html -[ThreadLocalMonotonicTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.html -[ServerSideTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ServerSideTimestampGenerator.html +[TimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TimestampGenerator.html +[AtomicMonotonicTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/AtomicMonotonicTimestampGenerator.html +[ThreadLocalMonotonicTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ThreadLocalMonotonicTimestampGenerator.html +[ServerSideTimestampGenerator]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ServerSideTimestampGenerator.html [gettimeofday]: http://man7.org/linux/man-pages/man2/settimeofday.2.html [JNR]: https://github.com/jnr/jnr-ffi diff --git a/manual/reconnection/README.md b/manual/reconnection/README.md index f99be3ef99f..a3a4733f0b4 100644 --- a/manual/reconnection/README.md +++ b/manual/reconnection/README.md @@ -29,7 +29,7 @@ You can also write your own policy; it must implement [ReconnectionPolicy]. For best results, use reasonable values: very low values (for example a constant delay of 10 milliseconds) will quickly saturate your system. -[ReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/ReconnectionPolicy.html -[Cluster.Builder.withReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Cluster.Builder.html#withReconnectionPolicy-com.datastax.driver.core.policies.ReconnectionPolicy- -[ExponentialReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.html -[ConstantReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/ConstantReconnectionPolicy.html +[ReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ReconnectionPolicy.html +[Cluster.Builder.withReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Cluster.Builder.html#withReconnectionPolicy-com.datastax.driver.core.policies.ReconnectionPolicy- +[ExponentialReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.html +[ConstantReconnectionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ConstantReconnectionPolicy.html diff --git a/manual/retries/README.md b/manual/retries/README.md index 231707022c9..1770cd71cf9 100644 --- a/manual/retries/README.md +++ b/manual/retries/README.md @@ -146,33 +146,33 @@ implementations to handle idempotence (the new behavior is equivalent to what yo `IdempotenceAwareRetryPolicy` before). -[RetryDecision]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html -[retry()]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#retry-com.datastax.driver.core.ConsistencyLevel- -[tryNextHost()]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#tryNextHost-com.datastax.driver.core.ConsistencyLevel- -[rethrow()]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#rethrow-- -[ignore()]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#ignore-- -[NoHostAvailableException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/NoHostAvailableException.html -[getErrors()]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- -[RetryPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.html -[DefaultRetryPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/DefaultRetryPolicy.html -[onReadTimeout]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- -[onWriteTimeout]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onWriteTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.WriteType-int-int-int- -[onUnavailable]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onUnavailable-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-int- -[onRequestError]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- -[UnavailableException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/UnavailableException.html -[ReadTimeoutException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/ReadTimeoutException.html -[WriteTimeoutException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/WriteTimeoutException.html -[OverloadedException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/OverloadedException.html -[ServerError]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/ServerError.html -[OperationTimedOutException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/OperationTimedOutException.html -[ConnectionException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/ConnectionException.html -[QueryValidationException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/QueryValidationException.html -[InvalidQueryException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/InvalidQueryException.html -[InvalidConfigurationInQueryException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.html -[UnauthorizedException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/UnauthorizedException.html -[SyntaxError]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/SyntaxError.html -[AlreadyExistsException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/AlreadyExistsException.html -[TruncateException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/TruncateException.html +[RetryDecision]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html +[retry()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#retry-com.datastax.driver.core.ConsistencyLevel- +[tryNextHost()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#tryNextHost-com.datastax.driver.core.ConsistencyLevel- +[rethrow()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#rethrow-- +[ignore()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.RetryDecision.html#ignore-- +[NoHostAvailableException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html +[getErrors()]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/NoHostAvailableException.html#getErrors-- +[RetryPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html +[DefaultRetryPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html +[onReadTimeout]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- +[onWriteTimeout]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onWriteTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.WriteType-int-int-int- +[onUnavailable]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onUnavailable-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-int- +[onRequestError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/DefaultRetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- +[UnavailableException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/UnavailableException.html +[ReadTimeoutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/ReadTimeoutException.html +[WriteTimeoutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/WriteTimeoutException.html +[OverloadedException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/OverloadedException.html +[ServerError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/ServerError.html +[OperationTimedOutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/OperationTimedOutException.html +[ConnectionException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/ConnectionException.html +[QueryValidationException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/QueryValidationException.html +[InvalidQueryException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/InvalidQueryException.html +[InvalidConfigurationInQueryException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.html +[UnauthorizedException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/UnauthorizedException.html +[SyntaxError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/SyntaxError.html +[AlreadyExistsException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/AlreadyExistsException.html +[TruncateException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/TruncateException.html [query plan]: ../load_balancing/#query-plan [connection pool]: ../pooling/ diff --git a/manual/shaded_jar/README.md b/manual/shaded_jar/README.md index 1661c85d181..2ce9d018341 100644 --- a/manual/shaded_jar/README.md +++ b/manual/shaded_jar/README.md @@ -12,7 +12,7 @@ package name: com.scylladb scylla-driver-core - 3.10.2.0 + 3.11.2.0 shaded @@ -32,7 +32,7 @@ non-shaded JAR: com.scylladb scylla-driver-core - 3.10.2.0 + 3.11.2.0 shaded @@ -44,7 +44,7 @@ non-shaded JAR: com.scylladb scylla-driver-mapping - 3.10.2.0 + 3.11.2.0 com.scylladb @@ -74,5 +74,5 @@ detects that shaded Netty classes are being used: Detected shaded Netty classes in the classpath; native epoll transport will not work properly, defaulting to NIO. -[NettyOptions]:https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/NettyOptions.html +[NettyOptions]:https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/NettyOptions.html [Netty native transports]:http://netty.io/wiki/native-transports.html diff --git a/manual/socket_options/README.md b/manual/socket_options/README.md index 3edd9a74718..4b79fd684dc 100644 --- a/manual/socket_options/README.md +++ b/manual/socket_options/README.md @@ -117,15 +117,15 @@ To clarify: We might rename `SocketOptions.setReadTimeoutMillis` in a future version to clear up any confusion. -[SocketOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html -[setReadTimeoutMillis]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setReadTimeoutMillis-int- -[setConnectTimeoutMillis]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setConnectTimeoutMillis-int- -[setKeepAlive]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setKeepAlive-boolean- -[setReceiveBufferSize]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setReceiveBufferSize-int- -[setReuseAddress]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setReuseAddress-boolean- -[setSendBufferSize]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setSendBufferSize-int- -[setSoLinger]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setSoLinger-int- -[setTcpNoDelay]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SocketOptions.html#setTcpNoDelay-boolean- -[onReadTimeout]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- -[onRequestError]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- -[OperationTimedOutException]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/exceptions/OperationTimedOutException.html \ No newline at end of file +[SocketOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html +[setReadTimeoutMillis]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setReadTimeoutMillis-int- +[setConnectTimeoutMillis]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setConnectTimeoutMillis-int- +[setKeepAlive]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setKeepAlive-boolean- +[setReceiveBufferSize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setReceiveBufferSize-int- +[setReuseAddress]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setReuseAddress-boolean- +[setSendBufferSize]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setSendBufferSize-int- +[setSoLinger]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setSoLinger-int- +[setTcpNoDelay]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SocketOptions.html#setTcpNoDelay-boolean- +[onReadTimeout]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html#onReadTimeout-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-int-int-boolean-int- +[onRequestError]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html#onRequestError-com.datastax.driver.core.Statement-com.datastax.driver.core.ConsistencyLevel-com.datastax.driver.core.exceptions.DriverException-int- +[OperationTimedOutException]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/exceptions/OperationTimedOutException.html \ No newline at end of file diff --git a/manual/speculative_execution/README.md b/manual/speculative_execution/README.md index 3ef65f60242..7a1fe5b4007 100644 --- a/manual/speculative_execution/README.md +++ b/manual/speculative_execution/README.md @@ -73,7 +73,7 @@ Speculative executions are controlled by an instance of `Cluster`. This policy defines the threshold after which a new speculative execution will be triggered. -[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html +[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html Two implementations are provided with the driver: @@ -101,7 +101,7 @@ way: * if no response has been received at t0 + 1000 milliseconds, start another speculative execution on a third node. -[ConstantSpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.html +[ConstantSpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/ConstantSpeculativeExecutionPolicy.html #### [PercentileSpeculativeExecutionPolicy] @@ -160,10 +160,10 @@ Note that `PercentileTracker` may also be used with a slow query logger (see the [Logging](../logging/) section). In that case, you would create a single tracker object and share it with both components. -[PercentileSpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.html -[PercentileTracker]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PercentileTracker.html -[ClusterWidePercentileTracker]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ClusterWidePercentileTracker.html -[PerHostPercentileTracker]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PerHostPercentileTracker.html +[PercentileSpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/PercentileSpeculativeExecutionPolicy.html +[PercentileTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PercentileTracker.html +[ClusterWidePercentileTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ClusterWidePercentileTracker.html +[PerHostPercentileTracker]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PerHostPercentileTracker.html [hdr]: http://hdrhistogram.github.io/HdrHistogram/ #### Using your own @@ -210,7 +210,7 @@ client driver exec1 exec2 The only impact is that all executions of the same query always share the same query plan, so each host will be used by at most one execution. -[retry_policy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.html +[retry_policy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html ### Tuning and practical details @@ -225,8 +225,8 @@ You can monitor how many speculative executions were triggered with the It should only be a few percents of the total number of requests ([cluster.getMetrics().getRequestsTimer().getCount()][request_metric]). -[se_metric]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metrics.Errors.html#getSpeculativeExecutions-- -[request_metric]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Metrics.html#getRequestsTimer-- +[se_metric]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metrics.Errors.html#getSpeculativeExecutions-- +[request_metric]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Metrics.html#getRequestsTimer-- #### Stream id exhaustion @@ -255,8 +255,8 @@ sustained. If you're unsure of which native protocol version you're using, you can check with [cluster.getConfiguration().getProtocolOptions().getProtocolVersion()][protocol_version]. -[session_state]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.State.html -[protocol_version]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- +[session_state]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.State.html +[protocol_version]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/ProtocolOptions.html#getProtocolVersion-- #### Request ordering and client timestamps diff --git a/manual/ssl/README.md b/manual/ssl/README.md index 9fb628bacd5..47dbd5f9863 100644 --- a/manual/ssl/README.md +++ b/manual/ssl/README.md @@ -207,8 +207,8 @@ Cluster cluster = Cluster.builder() .build(); ``` -[RemoteEndpointAwareSSLOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.html -[RemoteEndpointAwareJdkSSLOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html -[newSSLEngine]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html#newSSLEngine-io.netty.channel.socket.SocketChannel-java.net.InetSocketAddress- -[RemoteEndpointAwareNettySSLOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.html -[NettyOptions]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/NettyOptions.html +[RemoteEndpointAwareSSLOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareSSLOptions.html +[RemoteEndpointAwareJdkSSLOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html +[newSSLEngine]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareJdkSSLOptions.html#newSSLEngine-io.netty.channel.socket.SocketChannel-java.net.InetSocketAddress- +[RemoteEndpointAwareNettySSLOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/RemoteEndpointAwareNettySSLOptions.html +[NettyOptions]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/NettyOptions.html diff --git a/manual/statements/README.md b/manual/statements/README.md index 9a941c30128..7066a73480d 100644 --- a/manual/statements/README.md +++ b/manual/statements/README.md @@ -32,14 +32,14 @@ If you use custom policies ([RetryPolicy], [LoadBalancingPolicy], properties that influence statement execution. To achieve this, you can wrap your statements in a custom [StatementWrapper] implementation. -[Statement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Statement.html -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/querybuilder/QueryBuilder.html -[StatementWrapper]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/StatementWrapper.html -[RetryPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/RetryPolicy.html -[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/LoadBalancingPolicy.html -[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html -[execute]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- -[executeAsync]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- +[Statement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Statement.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html +[StatementWrapper]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/StatementWrapper.html +[RetryPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/RetryPolicy.html +[LoadBalancingPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/LoadBalancingPolicy.html +[SpeculativeExecutionPolicy]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/policies/SpeculativeExecutionPolicy.html +[execute]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- +[executeAsync]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- ```eval_rst .. toctree:: diff --git a/manual/statements/batch/README.md b/manual/statements/batch/README.md index e6db824424a..d54d79589d0 100644 --- a/manual/statements/batch/README.md +++ b/manual/statements/batch/README.md @@ -34,9 +34,9 @@ In addition, simple statements with named parameters are currently not supported due to a [protocol limitation][CASSANDRA-10246] that will be fixed in a future version). If you try to execute such a batch, an `IllegalArgumentException` is thrown. -[BatchStatement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BatchStatement.html +[BatchStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.html [batch_dse]: http://docs.datastax.com/en/dse/5.1/cql/cql/cql_using/useBatch.html -[LOGGED]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BatchStatement.Type.html#LOGGED -[UNLOGGED]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BatchStatement.Type.html#UNLOGGED +[LOGGED]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.Type.html#LOGGED +[UNLOGGED]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BatchStatement.Type.html#UNLOGGED [batch_size_fail_threshold]: https://docs.datastax.com/en/cassandra/3.x/cassandra/configuration/configCassandra_yaml.html#configCassandra_yaml__batch_size_fail_threshold_in_kb [CASSANDRA-10246]: https://issues.apache.org/jira/browse/CASSANDRA-10246 diff --git a/manual/statements/built/README.md b/manual/statements/built/README.md index d54f6e30faa..eb1a51cb5cb 100644 --- a/manual/statements/built/README.md +++ b/manual/statements/built/README.md @@ -76,7 +76,7 @@ BuiltStatement ttlAndWriteTime = QueryBuilder.select().column("id").column("t") ``` You can also cast the value of the given column to another type by using the `cast` function, -[specifying](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/querybuilder/QueryBuilder.html#cast-java.lang.Object-com.datastax.driver.core.DataType-) +[specifying](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html#cast-java.lang.Object-com.datastax.driver.core.DataType-) the column for which it should be performed, and to what type it should be casted. #### Specifying conditions @@ -92,7 +92,7 @@ BuiltStatement selectOne = QueryBuilder.select().from("test", "test") The `where` function accepts the `Clause` object that is generated by calling `QueryBuilder`'s -[functions](https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/querybuilder/QueryBuilder.html#eq-java.lang.Iterable-java.lang.Iterable-), +[functions](https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html#eq-java.lang.Iterable-java.lang.Iterable-), such as, `eq`, `ne`, `lt`, `in`, `contains`, `notNull`, etc. In most cases, these functions receive 2 arguments: the name of the column, and the value to compare, but there are also variants that receive 2 iterables for columns and values correspondingly. @@ -263,6 +263,6 @@ Note: the call to these functions changes the object type from `BuiltStatement` -[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/querybuilder/QueryBuilder.html -[TableMetadata]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TableMetadata.html -[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/schemabuilder/SchemaBuilder.html +[QueryBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/querybuilder/QueryBuilder.html +[TableMetadata]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TableMetadata.html +[SchemaBuilder]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/schemabuilder/SchemaBuilder.html diff --git a/manual/statements/prepared/README.md b/manual/statements/prepared/README.md index d9dd7fee278..ff1dca78057 100644 --- a/manual/statements/prepared/README.md +++ b/manual/statements/prepared/README.md @@ -256,11 +256,11 @@ relying on `SELECT *`. This will be addressed in a future release of both Cassandra and the driver. Follow [CASSANDRA-10786] and [JAVA-1196] for more information. -[PreparedStatement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/PreparedStatement.html -[BoundStatement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/BoundStatement.html -[setPrepareOnAllHosts]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryOptions.html#setPrepareOnAllHosts-boolean- -[setReprepareOnUp]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/QueryOptions.html#setReprepareOnUp-boolean- -[execute]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- -[executeAsync]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- +[PreparedStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/PreparedStatement.html +[BoundStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/BoundStatement.html +[setPrepareOnAllHosts]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setPrepareOnAllHosts-boolean- +[setReprepareOnUp]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/QueryOptions.html#setReprepareOnUp-boolean- +[execute]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#execute-com.datastax.driver.core.Statement- +[executeAsync]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/Session.html#executeAsync-com.datastax.driver.core.Statement- [CASSANDRA-10786]: https://issues.apache.org/jira/browse/CASSANDRA-10786 [JAVA-1196]: https://datastax-oss.atlassian.net/browse/JAVA-1196 diff --git a/manual/statements/simple/README.md b/manual/statements/simple/README.md index 1144ba64c83..b08adc7b018 100644 --- a/manual/statements/simple/README.md +++ b/manual/statements/simple/README.md @@ -128,4 +128,4 @@ session.execute( 1, bytes); ``` -[SimpleStatement]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/SimpleStatement.html +[SimpleStatement]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/SimpleStatement.html diff --git a/manual/tuples/README.md b/manual/tuples/README.md index b8842082c99..f0b9f0061b8 100644 --- a/manual/tuples/README.md +++ b/manual/tuples/README.md @@ -96,7 +96,7 @@ bs.setList("l", Arrays.asList(oneTimeUsageTuple.newValue("1", "1"), oneTimeUsage session.execute(bs); ``` -[TupleType]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TupleType.html -[TupleValue]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TupleValue.html -[newValueVararg]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TupleType.html#newValue-java.lang.Object...- -[newValue]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/TupleType.html#newValue-- +[TupleType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html +[TupleValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleValue.html +[newValueVararg]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html#newValue-java.lang.Object...- +[newValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/TupleType.html#newValue-- diff --git a/manual/udts/README.md b/manual/udts/README.md index f2f4d514496..892a5a95b1b 100644 --- a/manual/udts/README.md +++ b/manual/udts/README.md @@ -95,5 +95,5 @@ session.execute(bs); [cql_doc]: https://docs.datastax.com/en/cql/3.3/cql/cql_reference/cqlRefUDType.html -[UDTValue]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/UDTValue.html -[UserType]: https://docs.datastax.com/en/drivers/java/3.10/com/datastax/driver/core/UserType.html +[UDTValue]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UDTValue.html +[UserType]: https://docs.datastax.com/en/drivers/java/3.11/com/datastax/driver/core/UserType.html diff --git a/pom.xml b/pom.xml index f934899c056..307e2df8864 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ com.scylladb scylla-driver-parent - 3.10.2.0-SNAPSHOT + 3.11.2.1-SNAPSHOT pom Java Driver for Scylla and Apache Cassandra @@ -55,15 +55,15 @@ 1.7.25 1.7.25 19.0 - 4.0.56.Final - 2.0.7.Final + 4.1.75.Final + 2.0.50.Final 3.2.2 1.1.2.6 1.4.1 2.1.10 2.8.11 - 2.7.9.3 + 2.7.9.7 2.9.9 1.0 1.0.4 @@ -92,6 +92,9 @@ unit true + + fedora @@ -664,7 +667,7 @@ - 3.10.1 + 3.11.0.1 ../clirr-ignores.xml com/datastax/shaded/** @@ -732,7 +735,7 @@ maven-surefire-plugin - 2.18 + 3.0.0-M6 ${test.groups} false @@ -951,7 +954,7 @@ short - unit,short + short false @@ -959,7 +962,7 @@ long - unit,short,long + long false @@ -967,7 +970,7 @@ duration - unit,short,long,duration + duration false @@ -975,7 +978,7 @@ doc - unit,doc + doc diff --git a/testing/README.md b/testing/README.md index 4dfbb525351..3b378d3a99c 100644 --- a/testing/README.md +++ b/testing/README.md @@ -2,7 +2,7 @@ ### Install CCM - pip install ccm + pip3 install https://github.com/scylladb/scylla-ccm/archive/master.zip ### Setup CCM Loopbacks (required for OSX)