diff --git a/.build.linux.sh b/.build.linux.sh
index 7387ce3a6..80529e00d 100644
--- a/.build.linux.sh
+++ b/.build.linux.sh
@@ -54,6 +54,10 @@ install_openssl() {
true # Already installed on image
}
+install_zlib() {
+ true # Already installed on image
+}
+
install_driver() {(
cd packaging
diff --git a/.build.osx.sh b/.build.osx.sh
index 196aa8260..e88d8dfe4 100644
--- a/.build.osx.sh
+++ b/.build.osx.sh
@@ -39,6 +39,16 @@ install_openssl() {
fi
}
+install_zlib() {
+ if brew ls --versions zlib > /dev/null; then
+ if ! brew outdated zlib; then
+ brew upgrade zlib
+ fi
+ else
+ brew install zlib
+ fi
+}
+
install_driver() {
true
}
diff --git a/.build.sh b/.build.sh
index 1f9c3fcdb..8c22501a0 100644
--- a/.build.sh
+++ b/.build.sh
@@ -35,8 +35,8 @@ else
fi
get_driver_version() {
- local header_file=$1
- local driver_prefix=$2
+ local header_file=${1}
+ local driver_prefix=${2}
local driver_version=$(grep "#define[ \t]\+${driver_prefix}_VERSION_\(MAJOR\|MINOR\|PATCH\|SUFFIX\)" ${header_file} | awk '
BEGIN { major="?"; minor="?"; patch="?" }
/_VERSION_MAJOR/ { major=$3 }
@@ -61,10 +61,11 @@ get_driver_version() {
install_dependencies() {
install_libuv
install_openssl
+ install_zlib
}
build_driver() {
- local driver_prefix=$1
+ local driver_prefix=${1}
# Ensure build directory is cleaned (static nodes are not cleaned)
[[ -d build ]] && rm -rf build
@@ -72,7 +73,17 @@ build_driver() {
(
cd build
- cmake -DCMAKE_BUILD_TYPE=Release -D${driver_prefix}_BUILD_SHARED=On -D${driver_prefix}_BUILD_STATIC=On -D${driver_prefix}_BUILD_EXAMPLES=On -D${driver_prefix}_BUILD_UNIT_TESTS=On ..
+ BUILD_INTEGRATION_TESTS=Off
+ if [ "${CI_INTEGRATION_ENABLED}" == "true" ]; then
+ BUILD_INTEGRATION_TESTS=On
+ fi
+ cmake -DCMAKE_BUILD_TYPE=Release \
+ -D${driver_prefix}_BUILD_SHARED=On \
+ -D${driver_prefix}_BUILD_STATIC=On \
+ -D${driver_prefix}_BUILD_EXAMPLES=On \
+ -D${driver_prefix}_BUILD_UNIT_TESTS=On \
+ -D${driver_prefix}_BUILD_INTEGRATION_TESTS=${BUILD_INTEGRATION_TESTS} \
+ ..
[[ -x $(which clang-format) ]] && make format-check
make -j${PROCS}
)
@@ -80,7 +91,7 @@ build_driver() {
check_driver_exports() {(
set +e #Disable fail fast for this subshell
- local driver_library=$1
+ local driver_library=${1}
if [ -f ${driver_library} ]; then
declare -a MISSING_FUNCTIONS
for function in "${@:2}"; do
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6482461eb..09b45c3d7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,61 @@
+2.14.0
+===========
+
+Bug Fixes
+--------
+* [CPP-819] - Ensure port is updated on already assigned contact points
+* [CPP-825] - Cloud should be verifying the peer certificates CN
+
+2.14.0-alpha2
+===========
+
+Features
+--------
+* [CPP-812] - Enable warnings for implicit casts and fix problems
+* [CPP-813] - Detect CaaS and change consistency default
+* [CPP-817] - Provide error if mixed usage of secure connect bundle and contact points/ssl context
+
+Bug Fixes
+--------
+* [CPP-802] - Handle prepared id mismatch when repreparing on the fly
+* [CPP-815] - Schema agreement fails with SNI
+* [CPP-811] - Requests won't complete if they exceed the number of streams on a connection
+
+2.14.0-alpha
+===========
+
+Features
+--------
+* [CPP-787] DataStax cloud platform
+ * [CPP-788] Support SNI at connection level using `host_id` as host name
+ * [CPP-793] Add SNI support to `SocketConnector` and SSL backend
+ * [CPP-794] Add domain name resolution to `SocketConnector`
+ * [CPP-795] Replace `Address` with endpoint or host type on connection path
+ * [CPP-797] Events need to map from affected node address to `host_id`
+ * [CPP-800] Node discovery should use the `host_id` (and endpoint address) instead of the
+ node's rpc_address
+ * [CPP-790] Configuration API for DBaaS
+ * [CPP-791] Add creds.zip support for automatic configuration
+ * [CPP-798] Configure authentication and SSL from secure connection bundle configuration
+ * [CPP-799] Use metadata service to determine contact points
+ * [CPP-788] Support SNI at connection level using `host_id` as host name
+ * [CPP-803] Propagate `local_dc` from `CloudClusterMetadataResolver` to load balancing policies
+
+Bug Fixes
+--------
+* [CPP-786] Fix TLS 1.3 support
+* [CPP-806] Fix handling of no contact points
+
+Other
+--------
+* [CPP-796] Correct compiler flags for mixed C and C++ projects
+
+Community
+--------
+* [CPP-754] Broken build with GCC 9 (eevans)
+* Add openssl to the required library list in pkg_config file (accelerated)
+* Allow random to work with 0 (joeyhub)
+
2.13.0
===========
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3c0c828cf..7bc63bb77 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -43,7 +43,7 @@ option(CASS_USE_OPENSSL "Use OpenSSL" ON)
option(CASS_USE_STATIC_LIBS "Link static libraries when building executables" OFF)
option(CASS_USE_STD_ATOMIC "Use C++11 atomics library" OFF)
option(CASS_USE_TCMALLOC "Use tcmalloc" OFF)
-option(CASS_USE_ZLIB "Use zlib" OFF)
+option(CASS_USE_ZLIB "Use zlib" ON)
option(CASS_USE_TIMERFD "Use timerfd (Linux only)" ON)
# Handle testing dependencies
@@ -108,8 +108,6 @@ CassAddIncludes()
CassFindSourceFiles()
CassConfigure()
-set(TEST_CXX_FLAGS ${CASS_TEST_CXX_FLAGS})
-
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
@@ -177,9 +175,6 @@ CassConfigureTests()
# no need to update CMakeLists.txt!
if(CASS_BUILD_EXAMPLES)
- if(CASS_USE_STATIC_LIBS)
- set(CASS_EXAMPLE_C_FLAGS "${CASS_EXAMPLE_C_FLAGS} -DCASS_STATIC")
- endif()
CassBuildExamples("examples")
endif()
diff --git a/README.md b/README.md
index 798625418..d6d869255 100644
--- a/README.md
+++ b/README.md
@@ -51,6 +51,7 @@ provided with the distribution:
* [Reverse DNS] with SSL peer identity verification support
* Randomized contact points
* [Speculative execution]
+* Support for [DataStax Constellation] Cloud Data Platform
## Compatibility
@@ -78,7 +79,6 @@ __Disclaimer__: DataStax products do not support big-endian systems.
* JIRA: https://datastax-oss.atlassian.net/browse/CPP
* Mailing List: https://groups.google.com/a/lists.datastax.com/forum/#!forum/cpp-driver-user
-* DataStax Academy via Slack: https://academy.datastax.com/slack
## Feedback Requested
@@ -198,6 +198,7 @@ specific language governing permissions and limitations under the License.
[ubuntu-16-04-dependencies]: http://downloads.datastax.com/cpp-driver/ubuntu/16.04/dependencies
[ubuntu-18-04-dependencies]: http://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies
[windows-dependencies]: http://downloads.datastax.com/cpp-driver/windows/dependencies
+[DataStax Constellation]: https://constellation.datastax.com
[Asynchronous API]: http://datastax.github.io/cpp-driver/topics/#futures
[Simple]: http://datastax.github.io/cpp-driver/topics/#executing-queries
diff --git a/appveyor.ps1 b/appveyor.ps1
index 24771dfda..fb732be8e 100644
--- a/appveyor.ps1
+++ b/appveyor.ps1
@@ -47,6 +47,18 @@ Function Perl-Version-Information {
}
}
+Function CMake-Version-Information {
+ If (Get-Command "cmake" -ErrorAction SilentlyContinue) {
+ $temporary_file = New-TemporaryFile
+ Start-Process -FilePath cmake -ArgumentList "--version" -RedirectStandardOutput $($temporary_file) -Wait -NoNewWindow
+ $output = Get-Content "$($temporary_file)" -Raw
+ Write-Host "$($output.Trim())" -BackgroundColor DarkBlue
+ Remove-Item $temporary_file
+ } Else {
+ Write-Host "CMake is not available" -BackgroundColor DarkRed
+ }
+}
+
Function Build-Configuration-Information {
$output = @"
Visual Studio: $($Env:CMAKE_GENERATOR.Split(" ")[-2]) [$($Env:CMAKE_GENERATOR.Split(" ")[-1])]
@@ -55,6 +67,7 @@ Boost: v$($Env:BOOST_VERSION)
libssh2: v$($Env:LIBSSH2_VERSION)
libuv: v$($Env:LIBUV_VERSION)
OpenSSL: v$(Get-OpenSSL-Version)
+zlib: v$($Env:ZLIB_VERSION)
Build Number: $($Env:APPVEYOR_BUILD_NUMBER)
Branch: $($Env:APPVEYOR_REPO_BRANCH)
SHA: $(Get-Commit-Sha)
@@ -104,17 +117,15 @@ Function Initialize-Build-Environment {
$libuv_version = $Env:LIBUV_VERSION
$openssl_version = Get-OpenSSL-Version
$Env:OPENSSL_VERSION = $openssl_version
+ $zlib_version = $Env:ZLIB_VERSION
$kerberos_version = "4.1"
$bison_version = "2.4.1"
$perl_version = "5.26.2.1"
# Determine the platform and create associate environment variables
- $architecture = "32"
- If ($Env:Platform -Like "x64") {
- $architecture = "64"
- }
- $lib_architecture = "lib$($architecture)"
- $windows_architecture = "win$($architecture)"
+ $Env:CMAKE_PLATFORM = $Env:Platform
+ $lib_architecture = "lib64"
+ $windows_architecture = "win64"
# Determine which header file to use for determine driver version
$driver_header_file = "cassandra.h"
@@ -149,13 +160,15 @@ Function Initialize-Build-Environment {
$Env:LIBUV_ROOT_DIR = "$($dependencies_location_prefix)/libuv-$($libuv_version)"
$Env:OPENSSL_BASE_DIR = "$($dependencies_location_prefix)/openssl-$($openssl_version)"
$Env:OPENSSL_ROOT_DIR = "$($Env:OPENSSL_BASE_DIR)/shared"
+ $Env:ZLIB_ROOT_DIR = "$($dependencies_location_prefix)/zlib-$($zlib_version)"
$Env:DRIVER_INSTALL_DIR = "C:/projects/driver/lib"
$Env:DRIVER_ARTIFACTS_DIR = "C:/projects/driver/artifacts"
$Env:DRIVER_ARTIFACTS_LOGS_DIR = "$($Env:DRIVER_ARTIFACTS_DIR)/logs"
# Generate the environment variables for the third party archives
- $Env:LIBUV_ARTIFACT_ARCHIVE = "libuv-$($libuv_version)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
- $Env:OPENSSL_ARTIFACT_ARCHIVE = "openssl-$($openssl_version)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ $Env:LIBUV_ARTIFACT_ARCHIVE = "libuv-$($libuv_version)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ $Env:OPENSSL_ARTIFACT_ARCHIVE = "openssl-$($openssl_version)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ $Env:ZLIB_ARTIFACT_ARCHIVE = "zlib-$($zlib_version)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
# Generate DataStax Enterprise specific environment variables
If ($Env:DRIVER_TYPE -Like "dse") {
@@ -185,15 +198,15 @@ Function Initialize-Build-Environment {
# Generate the archive name for the driver test and examples artifacts
$build_version = "$($Env:APPVEYOR_BUILD_NUMBER)-$($Env:APPVEYOR_REPO_BRANCH)"
# TODO: Re-enable OpenSSL version appending if multiple OpenSSL versions are enabled
- #$Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-openssl-$($Env:OPENSSL_MAJOR_MINOR)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
- #$Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-openssl-$($Env:OPENSSL_MAJOR_MINOR)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
- $Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
- $Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ #$Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-openssl-$($Env:OPENSSL_MAJOR_MINOR)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ #$Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-openssl-$($Env:OPENSSL_MAJOR_MINOR)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ $Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ $Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
# Generate the archive name for the driver packaging
# TODO: Re-enable OpenSSL version appending if multiple OpenSSL versions are enabled
- #$Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-openssl-$($Env:OPENSSL_MAJOR_MINOR)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
- $Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ #$Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-openssl-$($Env:OPENSSL_MAJOR_MINOR)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
+ $Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip"
# Generate additional download/install environments for third party build requirements
$Env:BISON_BINARIES_ARCHIVE = "bison-$($bison_version)-bin.zip"
@@ -304,8 +317,9 @@ Function Install-Driver-Environment {
}
}
- # Display the Perl version information
+ # Display the Perl and CMake version information
Perl-Version-Information
+ CMake-Version-Information
# Determine the location of the CMake modules (external projects)
$cmake_modules_dir = "$($Env:APPVEYOR_BUILD_FOLDER -Replace `"\\`", `"/`")/"
@@ -314,12 +328,6 @@ Function Install-Driver-Environment {
}
$cmake_modules_dir += "cmake/modules"
- # Determine the CMake generator to utilize
- $cmake_generator = $Env:CMAKE_GENERATOR
- If ($Env:Platform -Like "x64") {
- $cmake_generator += " Win64"
- }
-
# Build and install the dependencies (if needed; cached)
$dependencies_build_location_prefix = "C:/projects/dependencies/build/"
If (-Not (Test-Path -Path "$($Env:LIBUV_ROOT_DIR)/lib")) { # lib directory checked due to external project being CMake (automatically creates root directory)
@@ -342,7 +350,7 @@ add_dependencies(`${PROJECT_NAME} `${LIBUV_LIBRARY_NAME})
$cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force
Write-Host "Configuring libuv"
- cmake -G "$($cmake_generator)" -DBUILD_SHARED_LIBS=On "-DLIBUV_VERSION=$($Env:LIBUV_VERSION)" "-DLIBUV_INSTALL_PREFIX=$($Env:LIBUV_ROOT_DIR)" .
+ cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM -DBUILD_SHARED_LIBS=On "-DLIBUV_VERSION=$($Env:LIBUV_VERSION)" "-DLIBUV_INSTALL_PREFIX=$($Env:LIBUV_ROOT_DIR)" .
If ($LastExitCode -ne 0) {
If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") {
Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "libuv Output Log"
@@ -395,7 +403,7 @@ add_dependencies(`${PROJECT_NAME} `${OPENSSL_LIBRARY_NAME})
if ("$_" -Like "shared") {
$shared_libs = "On"
}
- cmake -G "$($cmake_generator)" "-DBUILD_SHARED_LIBS=$($shared_libs)" "-DOPENSSL_VERSION=$($Env:OPENSSL_VERSION)" "-DOPENSSL_INSTALL_PREFIX=$($Env:OPENSSL_BASE_DIR)/$_" .
+ cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-DBUILD_SHARED_LIBS=$($shared_libs)" "-DOPENSSL_VERSION=$($Env:OPENSSL_VERSION)" "-DOPENSSL_INSTALL_PREFIX=$($Env:OPENSSL_BASE_DIR)/$_" .
If ($LastExitCode -ne 0) {
If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") {
Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "OpenSSL Output Log"
@@ -423,6 +431,53 @@ add_dependencies(`${PROJECT_NAME} `${OPENSSL_LIBRARY_NAME})
}
}
+ If (-Not (Test-Path -Path "$($Env:ZLIB_ROOT_DIR)/lib")) {
+ New-Item -ItemType Directory -Force -Path "$($dependencies_build_location_prefix)/zlib" | Out-Null
+ Push-Location -Path "$($dependencies_build_location_prefix)/zlib"
+
+ $cmakelists_contents = @"
+cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
+project(zlib)
+set(PROJECT_DISPLAY_NAME "AppVeyor CI Build for zlib")
+set(PROJECT_MODULE_DIR $cmake_modules_dir)
+set(CMAKE_MODULE_PATH `${CMAKE_MODULE_PATH} `${PROJECT_MODULE_DIR})
+include(ExternalProject-zlib)
+set(GENERATED_SOURCE_FILE `${CMAKE_CURRENT_BINARY_DIR}/main.cpp)
+file(REMOVE `${GENERATED_SOURCE_FILE})
+file(WRITE `${GENERATED_SOURCE_FILE} "int main () { return 0; }")
+add_executable(`${PROJECT_NAME} `${GENERATED_SOURCE_FILE})
+add_dependencies(`${PROJECT_NAME} `${ZLIB_LIBRARY_NAME})
+"@
+ $cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force
+
+ Write-Host "Configuring zlib"
+ cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM -DBUILD_SHARED_LIBS=On "-DZLIB_VERSION=$($Env:ZLIB_VERSION)" "-DZLIB_INSTALL_PREFIX=$($Env:ZLIB_ROOT_DIR)" .
+ If ($LastExitCode -ne 0) {
+ If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") {
+ Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "zlib Output Log"
+ }
+ If (Test-Path -Path "build/CMakeFiles/CMakeError.log") {
+ Push-AppveyorArtifact "build/CMakeFiles/CMakeError.log" -DeploymentName "zlib Error Log"
+ }
+ Pop-Location
+ Throw "Failed to configure zlib for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)"
+ }
+ Write-Host "Building and Installing zlib"
+ cmake --build . --config RelWithDebInfo
+ If ($LastExitCode -ne 0) {
+ If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") {
+ Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "zlib Output Log"
+ }
+ If (Test-Path -Path "build/CMakeFiles/CMakeError.log") {
+ Push-AppveyorArtifact "build/CMakeFiles/CMakeError.log" -DeploymentName "zlib Error Log"
+ }
+ Pop-Location
+ Throw "Failed to build zlib for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)"
+ }
+
+ Pop-Location
+ }
+
# Handle installation of DataStax Enterprise dependencies
If ($Env:DRIVER_TYPE -Like "dse") {
# Determine if Kerberos for Windows should be installed (cached)
@@ -476,7 +531,7 @@ add_dependencies(`${PROJECT_NAME} `${BOOST_LIBRARY_NAME})
$cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force
Write-Host "Configuring Boost"
- cmake -G "$($cmake_generator)" "-DBOOST_VERSION=$($Env:BOOST_VERSION)" "-DBOOST_INSTALL_PREFIX=$($Env:BOOST_ROOT)" .
+ cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-DBOOST_VERSION=$($Env:BOOST_VERSION)" "-DBOOST_INSTALL_PREFIX=$($Env:BOOST_ROOT)" .
If ($LastExitCode -ne 0) {
If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") {
Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "Boost Output Log"
@@ -524,7 +579,7 @@ add_dependencies(`${PROJECT_NAME} `${LIBSSH2_LIBRARY_NAME})
$cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force
Write-Host "Configuring libssh2"
- cmake -G "$($cmake_generator)" "-DLIBSSH2_VERSION=$($Env:LIBSSH2_VERSION)" "-DLIBSSH2_INSTALL_PREFIX=$($Env:LIBSSH2_ROOT_DIR)" .
+ cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-DLIBSSH2_VERSION=$($Env:LIBSSH2_VERSION)" "-DLIBSSH2_INSTALL_PREFIX=$($Env:LIBSSH2_ROOT_DIR)" .
If ($LastExitCode -ne 0) {
If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") {
Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "libssh2 Output Log"
@@ -562,16 +617,9 @@ add_dependencies(`${PROJECT_NAME} `${LIBSSH2_LIBRARY_NAME})
}
Function Build-Driver {
- # Determine the CMake generator to utilize
- $cmake_generator = $Env:CMAKE_GENERATOR
- If ($Env:Platform -Like "x64") {
- $cmake_generator += " Win64"
- }
-
# Ensure Boost atomic is used for Visual Studio 2010 (increased performance)
$use_boost_atomic = "Off"
- If ($Env:VISUAL_STUDIO_INTERNAL_VERSION -Like "100" -Or
- ($Env:VISUAL_STUDIO_INTERNAL_VERSION -Like "110" -And $Env:Platform -Like "x86")) {
+ If ($Env:VISUAL_STUDIO_INTERNAL_VERSION -Like "100") {
$use_boost_atomic = "On" # Enable Boost atomic usage
}
@@ -583,7 +631,7 @@ Function Build-Driver {
New-Item -ItemType Directory -Force -Path "$($Env:APPVEYOR_BUILD_FOLDER)/build"
Push-Location "$($Env:APPVEYOR_BUILD_FOLDER)/build"
Write-Host "Configuring DataStax C/C++ $($driver_type) Driver"
- cmake -G "$($cmake_generator)" "-D$($Env:DRIVER_TYPE)_MULTICORE_COMPILATION=On" "-D$($Env:DRIVER_TYPE)_USE_OPENSSL=On" "-D$($Env:DRIVER_TYPE)_USE_BOOST_ATOMIC=$($use_boost_atomic)" "-D$($Env:DRIVER_TYPE)_BUILD_EXAMPLES=On" "-D$($Env:DRIVER_TYPE)_BUILD_TESTS=On" "-D$($Env:DRIVER_TYPE)_USE_LIBSSH2=On" "-DCMAKE_INSTALL_PREFIX=`"$($Env:DRIVER_INSTALL_DIR)`"" ..
+ cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-D$($Env:DRIVER_TYPE)_MULTICORE_COMPILATION=On" "-D$($Env:DRIVER_TYPE)_USE_OPENSSL=On" "-D$($Env:DRIVER_TYPE)_USE_ZLIB=On" "-D$($Env:DRIVER_TYPE)_USE_BOOST_ATOMIC=$($use_boost_atomic)" "-D$($Env:DRIVER_TYPE)_BUILD_EXAMPLES=On" "-D$($Env:DRIVER_TYPE)_BUILD_TESTS=On" "-D$($Env:DRIVER_TYPE)_USE_LIBSSH2=On" "-DCMAKE_INSTALL_PREFIX=`"$($Env:DRIVER_INSTALL_DIR)`"" ..
If ($LastExitCode -ne 0) {
Pop-Location
Throw "Failed to configure DataStax C/C++ $($driver_type) Driver for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)"
@@ -707,6 +755,17 @@ a -tzip "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)" -r "$($E
If ($process.ExitCode -ne 0) {
Throw "Failed to archive OpenSSL for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)"
}
+
+ # Clean up the library dependency directories for zlib packaging
+ New-Item -ItemType Directory -Force -Path "$($Env:DRIVER_ARTIFACTS_DIR)/zlib" | Out-Null
+ Copy-Item -Force -Recurse -Path "$($Env:ZLIB_ROOT_DIR)/*" "$($Env:DRIVER_ARTIFACTS_DIR)/zlib" | Out-Null
+ $argument_list = @"
+a -tzip "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:ZLIB_ARTIFACT_ARCHIVE)" -r "$($Env:DRIVER_ARTIFACTS_DIR)/zlib/*"
+"@
+ $process = Start-Process -FilePath 7z -ArgumentList $argument_list -PassThru -Wait -NoNewWindow
+ If ($process.ExitCode -ne 0) {
+ Throw "Failed to archive zlib for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)"
+ }
}
Function Push-Artifacts {
@@ -721,6 +780,7 @@ Function Push-Artifacts {
Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:DRIVER_ARTIFACT_TESTS_ARCHIVE)" -DeploymentName "DataStax C/C++ $($driver_type) Driver Tests"
Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:LIBUV_ARTIFACT_ARCHIVE)" -DeploymentName "libuv v$($Env:LIBUV_VERSION)"
Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)" -DeploymentName "OpenSSL v$($Env:OPENSSL_VERSION)"
+ Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:ZLIB_ARTIFACT_ARCHIVE)" -DeploymentName "zlib v$($Env:ZLIB_VERSION)"
}
}
@@ -781,6 +841,8 @@ Function Publish-Artifacts {
#TODO: Need to handle OpenSSL v1.1.x if enabled
$openssl_uri = "$($base_uri)/dependencies/openssl/v$($Env:OPENSSL_VERSION)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)"
$openssl_archive = "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)"
+ $zlib_uri = "$($base_uri)/dependencies/zlib/v$($Env:ZLIB_VERSION)/$($Env:ZLIB_ARTIFACT_ARCHIVE)"
+ $zlib_archive = "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:ZLIB_ARTIFACT_ARCHIVE)"
# Publish/Upload the driver and it dependencies to Artifactory
$is_failure = $False
@@ -798,6 +860,10 @@ Function Publish-Artifacts {
$is_failure = $True
$failed_upload += "OpenSSL"
}
+ If ((Publish-Artifact-To-Artifactory -Uri "$($zlib_uri)" -FilePath "$($zlib_archive)") -ne 0) {
+ $is_failure = $True
+ $failed_upload += "zlib"
+ }
# Check to see if there was a failure uploading the artifacts
If ($is_failure) {
diff --git a/appveyor.yml b/appveyor.yml
index 71df5120e..7c69713b0 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -25,7 +25,6 @@ cache:
- C:\projects\dependencies\bin -> appveyor.ps1
- C:\projects\dependencies\libs -> appveyor.yml
platform:
- - x86
- x64
hosts:
cpp-driver.hostname.: 127.254.254.254
@@ -36,14 +35,14 @@ environment:
secure: PLLc0JCL9I7y8zw8p9meQhxXGAbyWCjyWO17xKOsyxE=
ARTIFACTORY_PASSWORD:
secure: h28bN22Py3CZPqrWoZWEjIFnpes+kslusCKP1mRYdUqBEf+OO1kFEQTZ9DGD7tuCSIIRDI3Mf9LX8zgUdmdlZA==
- APPVEYOR_BUILD_WORKER_CLOUD: gce
APPVEYOR_IGNORE_COMMIT_FILTERING_ON_TAG: true
DRIVER_TYPE: CASS
BOOST_VERSION: 1.69.0
LIBSSH2_VERSION: 1.9.0
- LIBUV_VERSION: 1.29.1
+ LIBUV_VERSION: 1.33.0
OPENSSL_1_0_VERSION: 1.0.2s
OPENSSL_1_1_VERSION: 1.1.1c
+ ZLIB_VERSION: 1.2.11
matrix:
- CMAKE_GENERATOR: Visual Studio 10 2010
OPENSSL_MAJOR_MINOR: 1.0
@@ -85,6 +84,16 @@ environment:
# OPENSSL_MAJOR_MINOR: 1.1
# VISUAL_STUDIO_INTERNAL_VERSION: 141
# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
+ - CMAKE_GENERATOR: Visual Studio 16 2019
+ OPENSSL_MAJOR_MINOR: 1.0
+ VISUAL_STUDIO_INTERNAL_VERSION: 142
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
+ BOOST_VERSION: 1.70.0
+# - CMAKE_GENERATOR: Visual Studio 16 2019
+# OPENSSL_MAJOR_MINOR: 1.1
+# VISUAL_STUDIO_INTERNAL_VERSION: 142
+# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
+# BOOST_VERSION: 1.70.0
#init:
# - ps: iex ((New-Object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
install:
diff --git a/build.yaml b/build.yaml
index 4d3cce80e..699f87268 100644
--- a/build.yaml
+++ b/build.yaml
@@ -9,6 +9,20 @@ schedules:
slack: cpp-driver-dev-bots
branches:
include: ["/CPP-\\d+/", "master"]
+ env_vars: |
+ CI_SCHEDULE=commit
+ nightly:
+ schedule: nightly
+ notify:
+ slack: cpp-driver-dev-bots
+ branches:
+ include: ["/CPP-\\d+/", "master"]
+ matrix:
+ exclude:
+ - os: ['ubuntu/trusty64/cpp', 'ubuntu/xenial64/cpp', 'centos/6-64/cpp', 'centos/7-64/cpp', 'osx/high-sierra']
+ env_vars: |
+ CI_SCHEDULE=nightly
+ CI_INTEGRATION_ENABLED=true
architecture:
- x64
os:
@@ -19,7 +33,7 @@ os:
- centos/7-64/cpp
- osx/high-sierra
env:
- LIBUV_VERSION: 1.29.1
+ LIBUV_VERSION: 1.33.0
build:
- script: |
. .build.sh
@@ -33,11 +47,15 @@ build:
build/cassandra-unit-tests --gtest_output=xml:cassandra-unit-test-results.xml
+ if [ -f build/cassandra-integration-tests ]; then
+ build/cassandra-integration-tests --category=cassandra --keep-clusters --verbose --gtest_filter=DbaasTests* --gtest_output=xml:dbaas-integration-test-results.xml
+ fi
+
install_driver
test_installed_driver 'cassandra'
- xunit:
- - "*unit-test-results.xml"
+ - "*test-results.xml"
package:
allow_empty: true
include: # list of files and glob paths to include in the artifact, relative to the current working directory
@@ -46,6 +64,8 @@ release:
matrix:
exclude:
- os: [ osx/high-sierra ]
+ env_vars: |
+ CI_SCHEDULE=release
after:
each:
- script: |
diff --git a/cmake/modules/CppDriver.cmake b/cmake/modules/CppDriver.cmake
index 61f16470a..9ad759a55 100644
--- a/cmake/modules/CppDriver.cmake
+++ b/cmake/modules/CppDriver.cmake
@@ -138,21 +138,32 @@ endmacro()
# Arguments:
# prefix - prefix of global variable names that contain specific
# info on building the library (e.g. CASS or DSE).
-# Input: PROJECT_LIB_NAME, PROJECT_VERSION_STRING, PROJECT_VERSION_MAJOR,
-# PROJECT_CXX_LINKER_FLAGS, *_DRIVER_CXX_FLAGS
-# Output: CASS_INCLUDES and CASS_LIBS
+# Input: PROJECT_LIB_NAME, PROJECT_VERSION_STRING, PROJECT_VERSION_MAJOR
#------------------------
macro(CassConfigureShared prefix)
target_link_libraries(${PROJECT_LIB_NAME} ${${prefix}_LIBS})
set_target_properties(${PROJECT_LIB_NAME} PROPERTIES OUTPUT_NAME ${PROJECT_LIB_NAME})
set_target_properties(${PROJECT_LIB_NAME} PROPERTIES VERSION ${PROJECT_VERSION_STRING} SOVERSION ${PROJECT_VERSION_MAJOR})
- set_target_properties(${PROJECT_LIB_NAME} PROPERTIES LINK_FLAGS "${PROJECT_CXX_LINKER_FLAGS}")
set_target_properties(${PROJECT_LIB_NAME} PROPERTIES
COMPILE_PDB_NAME "${PROJECT_LIB_NAME}"
COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
- set_property(
- TARGET ${PROJECT_LIB_NAME}
- APPEND PROPERTY COMPILE_FLAGS "${${prefix}_DRIVER_CXX_FLAGS} -DCASS_BUILDING")
+ set(STATIC_COMPILE_FLAGS "-D${prefix}_BUILDING")
+ if("${prefix}" STREQUAL "DSE")
+ set(STATIC_COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -DCASS_BUILDING")
+ endif()
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ set_property(
+ TARGET ${PROJECT_LIB_NAME}
+ APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Wconversion -Wno-sign-conversion -Wno-shorten-64-to-32 -Wno-undefined-var-template -Werror")
+ elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") # To many superfluous warnings generated with GCC when using -Wconversion (see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40752)
+ set_property(
+ TARGET ${PROJECT_LIB_NAME}
+ APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Werror")
+ elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
+ set_property(
+ TARGET ${PROJECT_LIB_NAME}
+ APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} /we4800")
+ endif()
endmacro()
#------------------------
@@ -164,20 +175,38 @@ endmacro()
# prefix - prefix of global variable names that contain specific
# info on building the library (e.g. CASS or DSE).
# Input: PROJECT_LIB_NAME_STATIC, PROJECT_VERSION_STRING, PROJECT_VERSION_MAJOR,
-# PROJECT_CXX_LINKER_FLAGS, *_DRIVER_CXX_FLAGS
-# Output: CASS_INCLUDES and CASS_LIBS
+# *_USE_STATIC_LIBS
#------------------------
macro(CassConfigureStatic prefix)
target_link_libraries(${PROJECT_LIB_NAME_STATIC} ${${prefix}_LIBS})
set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES OUTPUT_NAME ${PROJECT_LIB_NAME_STATIC})
set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES VERSION ${PROJECT_VERSION_STRING} SOVERSION ${PROJECT_VERSION_MAJOR})
- set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES LINK_FLAGS "${PROJECT_CXX_LINKER_FLAGS}")
set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES
COMPILE_PDB_NAME "${PROJECT_LIB_NAME_STATIC}"
COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
- set_property(
- TARGET ${PROJECT_LIB_NAME_STATIC}
- APPEND PROPERTY COMPILE_FLAGS "${${prefix}_DRIVER_CXX_FLAGS} -DCASS_STATIC")
+ set(STATIC_COMPILE_FLAGS "-D${prefix}_STATIC")
+ if("${prefix}" STREQUAL "DSE")
+ set(STATIC_COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -DCASS_STATIC")
+ endif()
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
+ set_property(
+ TARGET ${PROJECT_LIB_NAME_STATIC}
+ APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Wconversion -Wno-sign-conversion -Wno-shorten-64-to-32 -Wno-undefined-var-template -Werror")
+ elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") # To many superfluous warnings generated with GCC when using -Wconversion (see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40752)
+ set_property(
+ TARGET ${PROJECT_LIB_NAME_STATIC}
+ APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Werror")
+ elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
+ set_property(
+ TARGET ${PROJECT_LIB_NAME_STATIC}
+ APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} /we4800")
+ endif()
+
+ # Update the CXX flags to indicate the use of the static library
+ if(${prefix}_USE_STATIC_LIBS)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STATIC_COMPILE_FLAGS}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${STATIC_COMPILE_FLAGS}")
+ endif()
endmacro()
#------------------------
@@ -444,6 +473,48 @@ macro(CassRapidJson)
include_directories(${RAPID_JSON_INCLUDE_DIR})
endmacro()
+#------------------------
+# CassMiniZip
+#
+# Set some MINIZIP_* variables, set up some source_group's, and add the
+# MINIZIP include dir to our list of include dirs.
+#
+# Input: CASS_SRC_DIR
+# Output: MINIZIP_INCLUDE_DIR, MINIZIP_HEADER_FILES, MINIZIP_SOURCE_FILES
+#------------------------
+macro(CassMiniZip)
+ if (ZLIB_FOUND)
+ set(MINIZIP_INCLUDE_DIR "${CASS_SRC_DIR}/third_party/minizip")
+ set(MINIZIP_HEADER_FILES ${MINIZIP_INCLUDE_DIR}/crypt.h
+ ${MINIZIP_INCLUDE_DIR}/ioapi.h
+ ${MINIZIP_INCLUDE_DIR}/unzip.h)
+ set(MINIZIP_SOURCE_FILES ${MINIZIP_INCLUDE_DIR}/ioapi.c
+ ${MINIZIP_INCLUDE_DIR}/unzip.c)
+ source_group("Header Files\\minizip" FILES ${MINIZIP_HEADER_FILES})
+ source_group("Source Files\\minizip" FILES ${MINIZIP_SOURCE_FILES})
+ include_directories(${MINIZIP_INCLUDE_DIR})
+ endif()
+endmacro()
+
+#------------------------
+# CassHttpParser
+#
+# Set some HTTP_PARSER_* variables, set up some source_group's, and add the
+# HTTP_PARSER include dir to our list of include dirs.
+#
+# Input: CASS_SRC_DIR
+# Output: HTTP_PARSER_INCLUDE_DIR, HTTP_PARSER_HEADER_FILES,
+# HTTP_PARSER_SOURCE_FILES
+#------------------------
+macro(CassHttpParser)
+ set(HTTP_PARSER_INCLUDE_DIR "${CASS_SRC_DIR}/third_party/http-parser")
+ set(HTTP_PARSER_HEADER_FILES ${HTTP_PARSER_INCLUDE_DIR}/http_parser.h)
+ set(HTTP_PARSER_SOURCE_FILES ${HTTP_PARSER_INCLUDE_DIR}/http_parser.c)
+ source_group("Header Files\\http-parser" FILES ${HTTP_PARSER_HEADER_FILES})
+ source_group("Source Files\\http-parser" FILES ${HTTP_PARSER_SOURCE_FILES})
+ include_directories(${HTTP_PARSER_INCLUDE_DIR})
+endmacro()
+
#------------------------
# CassSimulacron
#
@@ -672,6 +743,7 @@ macro(CassUseZlib)
# Assign zlib properties
set(CASS_INCLUDES ${CASS_INCLUDES} ${ZLIB_INCLUDE_DIRS})
set(CASS_LIBS ${CASS_LIBS} ${ZLIB_LIBRARIES})
+ set(HAVE_ZLIB On)
else()
message(WARNING "Could not find zlib, try to set the path to zlib root folder in the system variable ZLIB_ROOT_DIR")
message(WARNING "zlib libraries will not be linked into build")
@@ -694,8 +766,6 @@ endmacro()
#
# Input: CASS_USE_STD_ATOMIC, CASS_USE_BOOST_ATOMIC, CASS_MULTICORE_COMPILATION
# CASS_USE_STATIC_LIBS
-# Output: CASS_USE_STD_ATOMIC, CASS_DRIVER_CXX_FLAGS, CASS_TEST_CXX_FLAGS,
-# CASS_EXAMPLE_C_FLAGS
#------------------------
macro(CassSetCompilerFlags)
# Force OLD style of implicitly dereferencing variables
@@ -818,7 +888,6 @@ macro(CassSetCompilerFlags)
# TODO(mpenick): Fix these "possible loss of data" warnings
add_definitions(/wd4244)
add_definitions(/wd4267)
- add_definitions(/wd4800) # Performance warning due to automatic compiler casting from int to bool
# Add preprocessor definitions for proper compilation
add_definitions(-D_CRT_SECURE_NO_WARNINGS) # Remove warnings for not using safe functions (TODO: Fix codebase to be more secure for Visual Studio)
@@ -826,10 +895,8 @@ macro(CassSetCompilerFlags)
add_definitions(-D_SILENCE_TR1_NAMESPACE_DEPRECATION_WARNING) # Remove warnings for TR1 deprecation (Visual Studio 15 2017); caused by sparsehash
# Create the project, example, and test flags
- set(CASS_DRIVER_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}")
- set(CASS_EXAMPLE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}")
- # Enable bigobj for large object files during compilation (Cassandra types integration test)
- set(CASS_TEST_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS} /bigobj")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}")
# Assign additional library requirements for Windows
set(CASS_LIBS ${CASS_LIBS} iphlpapi psapi wsock32 crypt32 ws2_32 userenv)
@@ -846,19 +913,16 @@ macro(CassSetCompilerFlags)
# OpenSSL is deprecated on later versions of Mac OS X. The long-term solution
# is to provide a CommonCryto implementation.
if (APPLE AND CASS_USE_OPENSSL)
- set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -Wno-deprecated-declarations")
- set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -Wno-deprecated-declarations")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")
endif()
# Enable C++11 support to use std::atomic
if(CASS_USE_STD_ATOMIC)
- set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -std=c++11")
- set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -std=c++11")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
- set(CASS_DRIVER_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS} -Werror")
- set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}")
- set(CASS_EXAMPLE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}")
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# Clang/Intel specific compiler options
# I disabled long-long warning because boost generates about 50 such warnings
@@ -869,19 +933,16 @@ macro(CassSetCompilerFlags)
# OpenSSL is deprecated on later versions of Mac OS X. The long-term solution
# is to provide a CommonCryto implementation.
if (APPLE AND CASS_USE_OPENSSL)
- set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -Wno-deprecated-declarations")
- set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -Wno-deprecated-declarations")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")
endif()
# Enable C++11 support to use std::atomic
if(CASS_USE_STD_ATOMIC)
- set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -std=c++11")
- set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -std=c++11")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
- set(CASS_DRIVER_CXX_FLAGS " ${CMAKE_CXX_FLAGS} ${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS} -Werror")
- set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}")
- set(CASS_EXAMPLE_C_FLAGS "${CMAKE_C_FLAGS} -std=c89 ${WARNING_COMPILER_FLAGS}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}")
else()
message(FATAL_ERROR "Unsupported compiler: ${CMAKE_CXX_COMPILER_ID}")
endif()
@@ -981,6 +1042,14 @@ macro(CassFindSourceFiles)
${CASS_SRC_DIR}/ssl/ssl_no_impl.cpp)
endif()
+ CassMiniZip()
+ set(CASS_INC_FILES ${CASS_INC_FILES} ${MINIZIP_HEADER_FILES})
+ set(CASS_SRC_FILES ${CASS_SRC_FILES} ${MINIZIP_SOURCE_FILES})
+
+ CassHttpParser()
+ set(CASS_INC_FILES ${CASS_INC_FILES} ${HTTP_PARSER_HEADER_FILES})
+ set(CASS_SRC_FILES ${CASS_SRC_FILES} ${HTTP_PARSER_SOURCE_FILES})
+
set(CASS_ALL_SOURCE_FILES ${CASS_SRC_FILES} ${CASS_API_HEADER_FILES} ${CASS_INC_FILES})
endmacro()
@@ -1001,6 +1070,7 @@ macro(CassConfigure)
else()
check_symbol_exists(arc4random_buf "stdlib.h" HAVE_ARC4RANDOM)
endif()
+
# Determine if sigpipe is available
check_symbol_exists(SO_NOSIGPIPE "sys/socket.h;sys/types.h" HAVE_NOSIGPIPE)
check_symbol_exists(sigtimedwait "signal.h" HAVE_SIGTIMEDWAIT)
@@ -1008,7 +1078,6 @@ macro(CassConfigure)
message(WARNING "Unable to handle SIGPIPE on your platform")
endif()
-
# Determine if hash is in the tr1 namespace
string(REPLACE "::" ";" HASH_NAMESPACE_LIST ${HASH_NAMESPACE})
foreach(NAMESPACE ${HASH_NAMESPACE_LIST})
@@ -1018,7 +1087,7 @@ macro(CassConfigure)
endforeach()
# Check for GCC compiler builtins
- if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
check_cxx_source_compiles("int main() { return __builtin_bswap32(42); }" HAVE_BUILTIN_BSWAP32)
check_cxx_source_compiles("int main() { return __builtin_bswap64(42); }" HAVE_BUILTIN_BSWAP64)
endif()
diff --git a/cmake/modules/ExternalProject-libuv.cmake b/cmake/modules/ExternalProject-libuv.cmake
index 1d7f4ed3b..d008138e4 100644
--- a/cmake/modules/ExternalProject-libuv.cmake
+++ b/cmake/modules/ExternalProject-libuv.cmake
@@ -22,7 +22,7 @@ if(NOT LIBUV_INSTALL_PREFIX)
endif()
option(LIBUV_VERSION "libuv version to build and install")
if(NOT LIBUV_VERSION)
- set(LIBUV_VERSION "1.29.1")
+ set(LIBUV_VERSION "1.32.0")
endif()
set(LIBUV_VERSION ${LIBUV_VERSION} CACHE STRING "libuv version to build and install" FORCE)
diff --git a/docs.yaml b/docs.yaml
index bf151fdf4..4508fbfbb 100644
--- a/docs.yaml
+++ b/docs.yaml
@@ -53,10 +53,13 @@ rewrites:
- http://www.datastax.com/documentation/cql/3.1: https://docs.datastax.com/en/archived/cql/3.1
- http://www.datastax.com/documentation/cassandra/2.: https://docs.datastax.com/en/archived/cassandra/2.
- http://downloads.datastax.com/cpp-driver: https://downloads.datastax.com/cpp-driver/
+ - http://www.datastax.com/dev/blog/datastax-c-driver(.*)$: https://www.datastax.com/blog/
rules:
use_path_nav_files_only: true
versions:
+ - name: "2.14"
+ ref: 2.14.0
- name: "2.13"
ref: 2.13.0
- name: "2.12"
diff --git a/driver_config.hpp.in b/driver_config.hpp.in
index 43d8a6df2..979f783f2 100644
--- a/driver_config.hpp.in
+++ b/driver_config.hpp.in
@@ -12,5 +12,6 @@
#cmakedefine HAVE_ARC4RANDOM
#cmakedefine HAVE_GETRANDOM
#cmakedefine HAVE_TIMERFD
+#cmakedefine HAVE_ZLIB
#endif
diff --git a/examples/async/CMakeLists.txt b/examples/async/CMakeLists.txt
index 83dd9a540..c14971284 100644
--- a/examples/async/CMakeLists.txt
+++ b/examples/async/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/auth/CMakeLists.txt b/examples/auth/CMakeLists.txt
index 75ffeac9a..2a4071484 100644
--- a/examples/auth/CMakeLists.txt
+++ b/examples/auth/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/basic/CMakeLists.txt b/examples/basic/CMakeLists.txt
index fe54ecce5..57123cee3 100644
--- a/examples/basic/CMakeLists.txt
+++ b/examples/basic/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/batch/CMakeLists.txt b/examples/batch/CMakeLists.txt
index 496b6a168..5da99ca24 100644
--- a/examples/batch/CMakeLists.txt
+++ b/examples/batch/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/bind_by_name/CMakeLists.txt b/examples/bind_by_name/CMakeLists.txt
index d89178243..de3ef6dbd 100644
--- a/examples/bind_by_name/CMakeLists.txt
+++ b/examples/bind_by_name/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/callbacks/CMakeLists.txt b/examples/callbacks/CMakeLists.txt
index 4c17e89fd..e91448ccc 100644
--- a/examples/callbacks/CMakeLists.txt
+++ b/examples/callbacks/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/cloud/.gitignore b/examples/cloud/.gitignore
new file mode 100644
index 000000000..c3de202fd
--- /dev/null
+++ b/examples/cloud/.gitignore
@@ -0,0 +1 @@
+cloud
diff --git a/examples/cloud/CMakeLists.txt b/examples/cloud/CMakeLists.txt
new file mode 100644
index 000000000..731fdac05
--- /dev/null
+++ b/examples/cloud/CMakeLists.txt
@@ -0,0 +1,12 @@
+cmake_minimum_required(VERSION 2.6.4)
+
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ".")
+set(PROJECT_EXAMPLE_NAME cloud)
+
+file(GLOB EXAMPLE_SRC_FILES ${CASS_ROOT_DIR}/examples/cloud/*.c)
+include_directories(${INCLUDES})
+add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
+target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
+add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
+
+set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/cloud/cloud.c b/examples/cloud/cloud.c
new file mode 100644
index 000000000..c8b4a7266
--- /dev/null
+++ b/examples/cloud/cloud.c
@@ -0,0 +1,109 @@
+/*
+ This is free and unencumbered software released into the public domain.
+
+ Anyone is free to copy, modify, publish, use, compile, sell, or
+ distribute this software, either in source code form or as a compiled
+ binary, for any purpose, commercial or non-commercial, and by any
+ means.
+
+ In jurisdictions that recognize copyright laws, the author or authors
+ of this software dedicate any and all copyright interest in the
+ software to the public domain. We make this dedication for the benefit
+ of the public at large and to the detriment of our heirs and
+ successors. We intend this dedication to be an overt act of
+ relinquishment in perpetuity of all present and future rights to this
+ software under copyright law.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ For more information, please refer to
+*/
+
+#include
+#include
+
+int main(int argc, char* argv[]) {
+ /* Setup and connect to cluster */
+ CassFuture* connect_future = NULL;
+ CassCluster* cluster;
+ CassSession* session;
+
+ const char* secure_connect_bundle;
+ const char* username;
+ const char* password;
+
+ if (argc < 4) {
+ fprintf(stderr, "Usage: %s \n", argv[0]);
+ return 1;
+ }
+
+ secure_connect_bundle = argv[1];
+ username = argv[2];
+ password = argv[3];
+
+ cluster = cass_cluster_new();
+ session = cass_session_new();
+
+ /* Setup driver to connect to the cloud using the secure connection bundle */
+ if (cass_cluster_set_cloud_secure_connection_bundle(cluster, secure_connect_bundle) != CASS_OK) {
+ fprintf(stderr, "Unable to configure cloud using the secure connection bundle: %s\n",
+ secure_connect_bundle);
+ }
+
+ cass_cluster_set_credentials(cluster, username, password);
+
+ /* Provide the cluster object as configuration to connect the session */
+ connect_future = cass_session_connect(session, cluster);
+
+ if (cass_future_error_code(connect_future) == CASS_OK) {
+ /* Build statement and execute query */
+ const char* query = "SELECT release_version FROM system.local";
+ CassStatement* statement = cass_statement_new(query, 0);
+
+ CassFuture* result_future = cass_session_execute(session, statement);
+
+ if (cass_future_error_code(result_future) == CASS_OK) {
+ /* Retrieve result set and get the first row */
+ const CassResult* result = cass_future_get_result(result_future);
+ const CassRow* row = cass_result_first_row(result);
+
+ if (row) {
+ const CassValue* value = cass_row_get_column_by_name(row, "release_version");
+
+ const char* release_version;
+ size_t release_version_length;
+ cass_value_get_string(value, &release_version, &release_version_length);
+ printf("release_version: '%.*s'\n", (int)release_version_length, release_version);
+ }
+
+ cass_result_free(result);
+ } else {
+ /* Handle error */
+ const char* message;
+ size_t message_length;
+ cass_future_error_message(result_future, &message, &message_length);
+ fprintf(stderr, "Unable to run query: '%.*s'\n", (int)message_length, message);
+ }
+
+ cass_statement_free(statement);
+ cass_future_free(result_future);
+ } else {
+ /* Handle error */
+ const char* message;
+ size_t message_length;
+ cass_future_error_message(connect_future, &message, &message_length);
+ fprintf(stderr, "Unable to connect: '%.*s'\n", (int)message_length, message);
+ }
+
+ cass_future_free(connect_future);
+ cass_cluster_free(cluster);
+ cass_session_free(session);
+
+ return 0;
+}
diff --git a/examples/collections/CMakeLists.txt b/examples/collections/CMakeLists.txt
index c3f1c8e8a..c014b9dd3 100644
--- a/examples/collections/CMakeLists.txt
+++ b/examples/collections/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/concurrent_executions/CMakeLists.txt b/examples/concurrent_executions/CMakeLists.txt
index dd0afdebd..5f9a55326 100644
--- a/examples/concurrent_executions/CMakeLists.txt
+++ b/examples/concurrent_executions/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/date_time/CMakeLists.txt b/examples/date_time/CMakeLists.txt
index 10aca827d..37e27431d 100644
--- a/examples/date_time/CMakeLists.txt
+++ b/examples/date_time/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/duration/CMakeLists.txt b/examples/duration/CMakeLists.txt
index 238ddbf66..cb20e6d66 100644
--- a/examples/duration/CMakeLists.txt
+++ b/examples/duration/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/execution_profiles/CMakeLists.txt b/examples/execution_profiles/CMakeLists.txt
index e11becbee..bcf6010e1 100644
--- a/examples/execution_profiles/CMakeLists.txt
+++ b/examples/execution_profiles/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/host_listener/CMakeLists.txt b/examples/host_listener/CMakeLists.txt
index ab77f645f..ff8d0d64c 100644
--- a/examples/host_listener/CMakeLists.txt
+++ b/examples/host_listener/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/logging/CMakeLists.txt b/examples/logging/CMakeLists.txt
index ab7240f28..105618c4d 100644
--- a/examples/logging/CMakeLists.txt
+++ b/examples/logging/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/maps/CMakeLists.txt b/examples/maps/CMakeLists.txt
index f6e2677b6..d2ae7f72c 100644
--- a/examples/maps/CMakeLists.txt
+++ b/examples/maps/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/named_parameters/CMakeLists.txt b/examples/named_parameters/CMakeLists.txt
index 02310f092..cc52df5e6 100644
--- a/examples/named_parameters/CMakeLists.txt
+++ b/examples/named_parameters/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/paging/CMakeLists.txt b/examples/paging/CMakeLists.txt
index 4f56b7b23..03830d6b0 100644
--- a/examples/paging/CMakeLists.txt
+++ b/examples/paging/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/perf/CMakeLists.txt b/examples/perf/CMakeLists.txt
index b7dc4fa03..b754dba4f 100644
--- a/examples/perf/CMakeLists.txt
+++ b/examples/perf/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/prepared/CMakeLists.txt b/examples/prepared/CMakeLists.txt
index b8154777b..882211f01 100644
--- a/examples/prepared/CMakeLists.txt
+++ b/examples/prepared/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/schema_meta/CMakeLists.txt b/examples/schema_meta/CMakeLists.txt
index 059c27ae6..2fac2964c 100644
--- a/examples/schema_meta/CMakeLists.txt
+++ b/examples/schema_meta/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/simple/CMakeLists.txt b/examples/simple/CMakeLists.txt
index 8210ce63b..869730a97 100644
--- a/examples/simple/CMakeLists.txt
+++ b/examples/simple/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/ssl/CMakeLists.txt b/examples/ssl/CMakeLists.txt
index 5865ddecb..c08175135 100644
--- a/examples/ssl/CMakeLists.txt
+++ b/examples/ssl/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/tracing/CMakeLists.txt b/examples/tracing/CMakeLists.txt
index 5c9ca583b..0053c4f81 100644
--- a/examples/tracing/CMakeLists.txt
+++ b/examples/tracing/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/tuple/CMakeLists.txt b/examples/tuple/CMakeLists.txt
index d1645eecf..218f7f016 100644
--- a/examples/tuple/CMakeLists.txt
+++ b/examples/tuple/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/udt/CMakeLists.txt b/examples/udt/CMakeLists.txt
index 94f2ab8d9..7a3fb5046 100644
--- a/examples/udt/CMakeLists.txt
+++ b/examples/udt/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/examples/uuids/CMakeLists.txt b/examples/uuids/CMakeLists.txt
index 5659381e2..8302cedc8 100644
--- a/examples/uuids/CMakeLists.txt
+++ b/examples/uuids/CMakeLists.txt
@@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES})
target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS})
add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET})
-set_property(
- TARGET ${PROJECT_EXAMPLE_NAME}
- APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS})
set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples")
diff --git a/gtests/CMakeLists.txt b/gtests/CMakeLists.txt
index 87389271c..b70790d4f 100644
--- a/gtests/CMakeLists.txt
+++ b/gtests/CMakeLists.txt
@@ -98,7 +98,10 @@ if(CASS_BUILD_INTEGRATION_TESTS)
${PROJECT_LIB_NAME_TARGET})
set_property(TARGET ${INTEGRATION_TESTS_NAME} PROPERTY PROJECT_LABEL ${INTEGRATION_TESTS_DISPLAY_NAME})
set_property(TARGET ${INTEGRATION_TESTS_NAME} PROPERTY FOLDER "Tests")
- set_property(TARGET ${INTEGRATION_TESTS_NAME} APPEND PROPERTY COMPILE_FLAGS ${TEST_CXX_FLAGS})
+ # Enable bigobj for large object files during compilation (Cassandra types integration test)
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
+ set_property(TARGET ${INTEGRATION_TESTS_NAME} APPEND PROPERTY COMPILE_FLAGS "/bigobj")
+ endif()
if(LIBSSH2_LIBRARY_NAME)
add_dependencies(${INTEGRATION_TESTS_NAME} ${LIBSSH2_LIBRARY_NAME})
endif()
@@ -124,5 +127,10 @@ endif()
# Unit test executable
#------------------------------
if(CASS_BUILD_UNIT_TESTS)
- GtestUnitTests("cassandra" "" "" "${CASS_EXCLUDED_UNIT_TEST_FILES}")
+ # Add the ability to create zip files inside the unit tests
+ set(MINIZIP_INCLUDE_DIR "${CASS_SRC_DIR}/third_party/minizip")
+ set(MINIZIP_SOURCE_FILES ${MINIZIP_INCLUDE_DIR}/ioapi.c
+ ${MINIZIP_INCLUDE_DIR}/zip.c)
+
+ GtestUnitTests("cassandra" "${MINIZIP_SOURCE_FILES}" "${MINIZIP_INCLUDE_DIR}" "${CASS_EXCLUDED_UNIT_TEST_FILES}")
endif()
diff --git a/gtests/src/integration/driver_utils.cpp b/gtests/src/integration/driver_utils.cpp
index b507d3e87..b4134a4c7 100644
--- a/gtests/src/integration/driver_utils.cpp
+++ b/gtests/src/integration/driver_utils.cpp
@@ -50,13 +50,13 @@ unsigned int test::driver::internals::Utils::connect_timeout(CassCluster* cluste
std::string test::driver::internals::Utils::contact_points(CassCluster* cluster) {
std::string contact_points;
- const ContactPointList& contact_points_list = cluster->config().contact_points();
- for (ContactPointList::const_iterator it = contact_points_list.begin();
- it != contact_points_list.end(); ++it) {
+ const AddressVec& contact_points_list = cluster->config().contact_points();
+ for (AddressVec::const_iterator it = contact_points_list.begin(); it != contact_points_list.end();
+ ++it) {
if (contact_points.size() > 0) {
contact_points.push_back(',');
}
- contact_points.append((*it).c_str());
+ contact_points.append((*it).hostname_or_address().c_str());
}
return contact_points;
}
@@ -73,7 +73,17 @@ std::string test::driver::internals::Utils::host(CassFuture* future) {
if (future) {
Future* cass_future = static_cast(future);
if (cass_future->type() == Future::FUTURE_TYPE_RESPONSE) {
- return static_cast(cass_future)->address().to_string().c_str();
+ return static_cast(cass_future)->address().hostname_or_address().c_str();
+ }
+ }
+ return "";
+}
+
+std::string test::driver::internals::Utils::server_name(CassFuture* future) {
+ if (future) {
+ Future* cass_future = static_cast(future);
+ if (cass_future->type() == Future::FUTURE_TYPE_RESPONSE) {
+ return static_cast(cass_future)->address().server_name().c_str();
}
}
return "";
diff --git a/gtests/src/integration/driver_utils.hpp b/gtests/src/integration/driver_utils.hpp
index f86620ef4..d3808c4ff 100644
--- a/gtests/src/integration/driver_utils.hpp
+++ b/gtests/src/integration/driver_utils.hpp
@@ -73,6 +73,14 @@ class Utils {
*/
static std::string host(CassFuture* future);
+ /**
+ * Get the server name of the future
+ *
+ * @param future Future to retrieve server name from
+ * @return Server name
+ */
+ static std::string server_name(CassFuture* future);
+
/**
* Get the Murmur3 hash for a given value
*
diff --git a/gtests/src/integration/integration.cpp b/gtests/src/integration/integration.cpp
index 3d08f52ab..b6cd6c2b6 100644
--- a/gtests/src/integration/integration.cpp
+++ b/gtests/src/integration/integration.cpp
@@ -52,9 +52,11 @@ Integration::Integration()
, is_with_vnodes_(false)
, is_randomized_contact_points_(false)
, is_schema_metadata_(false)
+ , is_ccm_requested_(true)
, is_ccm_start_requested_(true)
, is_ccm_start_node_individually_(false)
, is_session_requested_(true)
+ , is_keyspace_change_requested_(true)
, is_test_chaotic_(false)
, is_beta_protocol_(Options::is_beta_protocol())
, protocol_version_(CASS_HIGHEST_SUPPORTED_PROTOCOL_VERSION)
@@ -63,7 +65,7 @@ Integration::Integration()
// Determine if the schema keyspaces table should be updated
// TODO: Make cass_version (and dse_version) available for all tests
CCM::CassVersion cass_version = server_version_;
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
cass_version = static_cast(cass_version).get_cass_version();
}
if (cass_version >= "3.0.0") {
@@ -138,47 +140,49 @@ void Integration::SetUp() {
data_center_nodes.push_back(number_dc1_nodes_);
data_center_nodes.push_back(number_dc2_nodes_);
- try {
- // Create and start the CCM cluster (if not already created)
- ccm_ = new CCM::Bridge(
- server_version_, Options::use_git(), Options::branch_tag(), Options::use_install_dir(),
- Options::install_dir(), Options::is_dse(), dse_workload_, Options::cluster_prefix(),
- Options::dse_credentials(), Options::dse_username(), Options::dse_password(),
- Options::deployment_type(), Options::authentication_type(), Options::host(),
- Options::port(), Options::username(), Options::password(), Options::public_key(),
- Options::private_key(), Options::is_verbose_ccm());
- if (ccm_->create_cluster(data_center_nodes, is_with_vnodes_, is_password_authenticator_,
- is_ssl_, is_client_authentication_)) {
- if (is_ccm_start_requested_) {
- if (is_ccm_start_node_individually_) {
- for (unsigned short node = 1; node <= (number_dc1_nodes_ + number_dc2_nodes_); ++node) {
+ if (is_ccm_requested_) {
+ try {
+ // Create and start the CCM cluster (if not already created)
+ ccm_ = new CCM::Bridge(
+ server_version_, Options::use_git(), Options::branch_tag(), Options::use_install_dir(),
+ Options::install_dir(), Options::server_type(), dse_workload_, Options::cluster_prefix(),
+ Options::dse_credentials(), Options::dse_username(), Options::dse_password(),
+ Options::deployment_type(), Options::authentication_type(), Options::host(),
+ Options::port(), Options::username(), Options::password(), Options::public_key(),
+ Options::private_key(), Options::is_verbose_ccm());
+ if (ccm_->create_cluster(data_center_nodes, is_with_vnodes_, is_password_authenticator_,
+ is_ssl_, is_client_authentication_)) {
+ if (is_ccm_start_requested_) {
+ if (is_ccm_start_node_individually_) {
+ for (unsigned short node = 1; node <= (number_dc1_nodes_ + number_dc2_nodes_); ++node) {
+ if (is_password_authenticator_) {
+ ccm_->start_node(node, "-Dcassandra.superuser_setup_delay_ms=0");
+ } else {
+ ccm_->start_node(node);
+ }
+ }
+ } else {
if (is_password_authenticator_) {
- ccm_->start_node(node, "-Dcassandra.superuser_setup_delay_ms=0");
+ ccm_->start_cluster("-Dcassandra.superuser_setup_delay_ms=0");
} else {
- ccm_->start_node(node);
+ ccm_->start_cluster();
}
}
- } else {
- if (is_password_authenticator_) {
- ccm_->start_cluster("-Dcassandra.superuser_setup_delay_ms=0");
- } else {
- ccm_->start_cluster();
- }
}
}
- }
- // Generate the default contact points
- contact_points_ =
- generate_contact_points(ccm_->get_ip_prefix(), number_dc1_nodes_ + number_dc2_nodes_);
+ // Generate the default contact points
+ contact_points_ =
+ generate_contact_points(ccm_->get_ip_prefix(), number_dc1_nodes_ + number_dc2_nodes_);
- // Determine if the session connection should be established
- if (is_session_requested_ && is_ccm_start_requested_) {
- connect();
+ // Determine if the session connection should be established
+ if (is_session_requested_ && is_ccm_start_requested_) {
+ connect();
+ }
+ } catch (CCM::BridgeException be) {
+ // Issue creating the CCM bridge instance (force failure)
+ FAIL() << be.what();
}
- } catch (CCM::BridgeException be) {
- // Issue creating the CCM bridge instance (force failure)
- FAIL() << be.what();
}
}
@@ -206,7 +210,9 @@ void Integration::TearDown() {
// Determine if the CCM cluster should be destroyed
if (is_test_chaotic_) {
// Destroy the current cluster and reset the chaos flag for the next test
- ccm_->remove_cluster();
+ if (!Options::keep_clusters()) {
+ ccm_->remove_cluster();
+ }
is_test_chaotic_ = false;
}
}
@@ -295,6 +301,16 @@ void Integration::drop_type(const std::string& type_name) {
session_.execute(drop_type_query.str(), CASS_CONSISTENCY_ANY, false, false);
}
+bool Integration::use_keyspace(const std::string& keyspace_name) {
+ std::stringstream use_keyspace_query;
+ use_keyspace_query << "USE " << keyspace_name;
+ session_.execute(use_keyspace_query.str());
+ if (this->HasFailure()) {
+ return false;
+ }
+ return true;
+}
+
void Integration::connect(Cluster cluster) {
// Establish the session connection
cluster_ = cluster;
@@ -303,6 +319,10 @@ void Integration::connect(Cluster cluster) {
// Update the server version if branch_tag was specified
if (Options::use_git() && !Options::branch_tag().empty()) {
+ if (Options::is_ddac()) {
+ FAIL() << "Unable to build DDAC from Branch/Tag";
+ return;
+ }
if (Options::is_dse()) {
server_version_ = ccm_->get_dse_version();
} else {
@@ -317,9 +337,9 @@ void Integration::connect(Cluster cluster) {
CHECK_FAILURE;
// Update the session to use the new keyspace by default
- std::stringstream use_keyspace_query;
- use_keyspace_query << "USE " << keyspace_name_;
- session_.execute(use_keyspace_query.str());
+ if (is_keyspace_change_requested_) {
+ use_keyspace(keyspace_name_);
+ }
}
void Integration::connect() {
diff --git a/gtests/src/integration/integration.hpp b/gtests/src/integration/integration.hpp
index c7d7a9734..a4f695d33 100644
--- a/gtests/src/integration/integration.hpp
+++ b/gtests/src/integration/integration.hpp
@@ -37,11 +37,13 @@
// Macros for grouping tests together
#define GROUP_TEST_F(group_name, test_case, test_name) TEST_F(test_case, group_name##_##test_name)
+#define GROUP_TEST(group_name, test_case, test_name) TEST(test_case, group_name##_##test_name)
#define GROUP_TYPED_TEST_P(group_name, test_case, test_name) \
TYPED_TEST_P(test_case, group_name##_##test_name)
// Macros to use for grouping integration tests together
-#define GROUP_INTEGRATION_TEST(server_type) GROUP_CONCAT(Integration, server_type)
+#define INTEGRATION_TEST(server_type, test_case, test_name) \
+ GROUP_TEST(Integration##_##server_type, test_case, test_name)
#define INTEGRATION_TEST_F(server_type, test_case, test_name) \
GROUP_TEST_F(Integration##_##server_type, test_case, test_name)
#define INTEGRATION_TYPED_TEST_P(server_type, test_case, test_name) \
@@ -52,7 +54,8 @@
GROUP_TYPED_TEST_P(DISABLED##_##Integration##_##server_type, test_case, est_name)
// Macros to use for grouping Cassandra integration tests together
-#define CASSANDRA_TEST_NAME(test_name) Integration##_##Cassandra##_##test_name
+#define CASSANDRA_INTEGRATION_TEST(test_case, test_name) \
+ INTEGRATION_TEST(Cassandra, test_case, test_name)
#define CASSANDRA_INTEGRATION_TEST_F(test_case, test_name) \
INTEGRATION_TEST_F(Cassandra, test_case, test_name)
#define CASSANDRA_INTEGRATION_TYPED_TEST_P(test_case, test_name) \
@@ -83,7 +86,7 @@
#define CHECK_VERSION(version) \
do { \
CCM::CassVersion cass_version = this->server_version_; \
- if (Options::is_dse()) { \
+ if (!Options::is_cassandra()) { \
cass_version = static_cast(cass_version).get_cass_version(); \
} \
if (cass_version < #version) { \
@@ -98,7 +101,7 @@
#define CHECK_VALUE_TYPE_VERSION(type) \
CCM::CassVersion cass_version = this->server_version_; \
- if (Options::is_dse()) { \
+ if (!Options::is_cassandra()) { \
cass_version = static_cast(cass_version).get_cass_version(); \
} \
if (cass_version < type::supported_server_version()) { \
@@ -107,8 +110,12 @@
#define CHECK_CONTINUE(flag, message) ASSERT_TRUE(flag) << message;
-#define CASSANDRA_KEY_VALUE_TABLE_FORMAT "CREATE TABLE %s (key %s PRIMARY KEY, value %s)"
+#define CASSANDRA_KEY_VALUE_TABLE_FORMAT \
+ "CREATE TABLE IF NOT EXISTS %s (key %s PRIMARY KEY, value %s)"
+#define CASSANDRA_KEY_VALUE_QUALIFIED_TABLE_FORMAT \
+ "CREATE TABLE IF NOT EXISTS %s.%s (key %s PRIMARY KEY, value %s)"
#define CASSANDRA_KEY_VALUE_INSERT_FORMAT "INSERT INTO %s (key, value) VALUES(%s, %s)"
+#define CASSANDRA_KEY_VALUE_QUALIFIED_INSERT_FORMAT "INSERT INTO %s.%s (key, value) VALUES(%s, %s)"
#define CASSANDRA_SELECT_VALUE_FORMAT "SELECT value FROM %s WHERE key=%s"
#define CASSANDRA_DELETE_ROW_FORMAT "DELETE FROM %s WHERE key=%s"
#define CASSANDRA_UPDATE_VALUE_FORMAT "UPDATE %s SET value=%s WHERE key=%s"
@@ -252,6 +259,12 @@ class Integration : public testing::Test {
* (DEFAULT: false)
*/
bool is_schema_metadata_;
+ /**
+ * Setting to determine if CCM instance should be created. True if CCM instance
+ * should be created; false otherwise.
+ * (DEFAULT: true)
+ */
+ bool is_ccm_requested_;
/**
* Setting to determine if CCM cluster should be started. True if CCM cluster
* should be started; false otherwise.
@@ -272,6 +285,11 @@ class Integration : public testing::Test {
* (DEFAULT: true)
*/
bool is_session_requested_;
+ /**
+ * Flag to indicate if the newly created keyspace should be set for the session connection.
+ * (DEFAULT: true)
+ */
+ bool is_keyspace_change_requested_;
/**
* Flag to indicate if a test is chaotic and should have its CCM cluster
* destroyed
@@ -369,6 +387,14 @@ class Integration : public testing::Test {
*/
virtual void drop_type(const std::string& type_name);
+ /**
+ * Update the current keyspace used by the session
+ *
+ * @param keyspace_name Keyspace to use
+ * @return True if keyspace was changed; false otherwise
+ */
+ virtual bool use_keyspace(const std::string& keyspace_name);
+
/**
* Establish the session connection using provided cluster object.
*
diff --git a/gtests/src/integration/main.cpp b/gtests/src/integration/main.cpp
index c9b2bcdd4..6efbdfe50 100644
--- a/gtests/src/integration/main.cpp
+++ b/gtests/src/integration/main.cpp
@@ -18,6 +18,7 @@
#include "bridge.hpp"
#include "options.hpp"
+#include "ssl.hpp"
#include "win_debug.hpp"
#include "cassandra.h"
@@ -25,6 +26,8 @@
#include
+using datastax::internal::core::SslContextFactory;
+
/**
* Bootstrap listener for handling start and end of the integration tests.
*/
@@ -65,6 +68,10 @@ class BootstrapListener : public testing::EmptyTestEventListener {
}
}
+ void OnTestStart(const testing::TestInfo& test_information) { SslContextFactory::init(); }
+
+ void OnTestEnd(const testing::TestInfo& test_information) { SslContextFactory::cleanup(); }
+
private:
/**
* Current category
@@ -106,11 +113,7 @@ std::string generate_filter(TestCategory category, const std::string& base_filte
int main(int argc, char* argv[]) {
// Initialize the Google testing framework
testing::InitGoogleTest(&argc, argv);
-
- // Add a bootstrap mechanism for program start and finish
- BootstrapListener* listener = NULL;
testing::TestEventListeners& listeners = testing::UnitTest::GetInstance()->listeners();
- listeners.Append(listener = new BootstrapListener());
#if defined(_WIN32) && defined(_DEBUG)
// Add the memory leak checking to the listener callbacks
@@ -121,6 +124,10 @@ int main(int argc, char* argv[]) {
#endif
#endif
+ // Add a bootstrap mechanism for program start and finish
+ BootstrapListener* listener = NULL;
+ listeners.Append(listener = new BootstrapListener());
+
// Initialize the options for the integration test
if (Options::initialize(argc, argv)) {
// Run the integration tests from each applicable category
diff --git a/gtests/src/integration/objects/future.hpp b/gtests/src/integration/objects/future.hpp
index b9e87c4a7..8df887f86 100644
--- a/gtests/src/integration/objects/future.hpp
+++ b/gtests/src/integration/objects/future.hpp
@@ -97,6 +97,13 @@ class Future : public Object {
*/
const std::string host() { return internals::Utils::host(get()); }
+ /**
+ * Get the server name of the future
+ *
+ * @return Server name
+ */
+ const std::string server_name() { return internals::Utils::server_name(get()); }
+
/**
* Get the result from the future
*
diff --git a/gtests/src/integration/objects/result.hpp b/gtests/src/integration/objects/result.hpp
index da70ad06d..0ac69a649 100644
--- a/gtests/src/integration/objects/result.hpp
+++ b/gtests/src/integration/objects/result.hpp
@@ -113,6 +113,13 @@ class Result : public Object {
*/
const std::string host() { return future_.host(); }
+ /**
+ * Get the server name of the future
+ *
+ * @return Server name
+ */
+ const std::string server_name() { return future_.server_name(); }
+
/**
* Get the number of columns from the result
*
diff --git a/gtests/src/integration/options.cpp b/gtests/src/integration/options.cpp
index fc371c7da..ab3ca1a55 100644
--- a/gtests/src/integration/options.cpp
+++ b/gtests/src/integration/options.cpp
@@ -24,7 +24,8 @@
#include
#define DEFAULT_OPTIONS_CASSSANDRA_VERSION CCM::CassVersion("3.11.4")
-#define DEFAULT_OPTIONS_DSE_VERSION CCM::DseVersion("6.0.8")
+#define DEFAULT_OPTIONS_DSE_VERSION CCM::DseVersion("6.7.5")
+#define DEFAULT_OPTIONS_DDAC_VERSION CCM::DseVersion("5.1.17")
// Initialize the defaults for all the options
bool Options::is_initialized_ = false;
@@ -32,7 +33,6 @@ bool Options::is_help_ = false;
bool Options::is_keep_clusters_ = false;
bool Options::is_log_tests_ = true;
CCM::CassVersion Options::server_version_ = DEFAULT_OPTIONS_CASSSANDRA_VERSION;
-bool Options::is_dse_ = false;
bool Options::use_git_ = false;
std::string Options::branch_tag_;
bool Options::use_install_dir_ = false;
@@ -55,6 +55,7 @@ CCM::DseCredentialsType Options::dse_credentials_type_;
CCM::AuthenticationType Options::authentication_type_;
CCM::DeploymentType Options::deployment_type_;
std::set Options::categories_;
+CCM::ServerType Options::server_type_;
bool Options::initialize(int argc, char* argv[]) {
// Only allow initialization to occur once
@@ -63,6 +64,7 @@ bool Options::initialize(int argc, char* argv[]) {
dse_credentials_type_ = CCM::DseCredentialsType::USERNAME_PASSWORD;
authentication_type_ = CCM::AuthenticationType::USERNAME_PASSWORD;
deployment_type_ = CCM::DeploymentType::LOCAL;
+ server_type_ = CCM::ServerType::CASSANDRA;
// Check for the help argument first (keeps defaults for help display)
for (int i = 1; i < argc; ++i) {
@@ -77,6 +79,8 @@ bool Options::initialize(int argc, char* argv[]) {
for (int i = 1; i < argc; ++i) {
if (std::string(argv[i]) == "--dse") {
server_version_ = DEFAULT_OPTIONS_DSE_VERSION;
+ } else if (std::string(argv[i]) == "--ddac") {
+ server_version_ = DEFAULT_OPTIONS_DDAC_VERSION;
}
}
@@ -107,7 +111,9 @@ bool Options::initialize(int argc, char* argv[]) {
<< std::endl;
}
} else if (key == "--dse") {
- is_dse_ = true;
+ server_type_ = CCM::ServerType::DSE;
+ } else if (key == "--ddac") {
+ server_type_ = CCM::ServerType::DDAC;
} else if (key == "--dse-username") {
if (!value.empty()) {
dse_username_ = value;
@@ -129,7 +135,7 @@ bool Options::initialize(int argc, char* argv[]) {
}
}
if (!is_found) {
- std::cerr << "Invalid DSE Credentials Type: Using default "
+ std::cerr << "Invalid DSE/DDAC Credentials Type: Using default "
<< dse_credentials_type_.to_string() << std::endl;
}
} else if (key == "--git") {
@@ -272,7 +278,7 @@ bool Options::initialize(int argc, char* argv[]) {
for (TestCategory::iterator iterator = TestCategory::begin(); iterator != TestCategory::end();
++iterator) {
// Only add the DSE test category if DSE is enabled
- if (*iterator != TestCategory::DSE || is_dse_) {
+ if (*iterator != TestCategory::DSE || is_dse()) {
categories_.insert(*iterator);
} else {
std::cerr << "DSE Category Will be Ignored: DSE is not enabled [--dse]" << std::endl;
@@ -282,11 +288,11 @@ bool Options::initialize(int argc, char* argv[]) {
if (deployment_type_ == CCM::DeploymentType::LOCAL) {
host_ = "127.0.0.1";
}
- if (is_dse_ && !use_install_dir_) {
- // Determine if the DSE credentials type should be updated
+ if (!is_cassandra() && !use_install_dir_) {
+ // Determine if the DSE/DDAC credentials type should be updated
if (dse_credentials_type_ == CCM::DseCredentialsType::USERNAME_PASSWORD) {
if (dse_username_.empty() || dse_password_.empty()) {
- std::cerr << "Invalid Username and/or Password: Default to INI_FILE DSE credentials"
+ std::cerr << "Invalid Username and/or Password: Default to INI_FILE DSE/DDAC credentials"
<< std::endl;
dse_credentials_type_ = CCM::DseCredentialsType::INI_FILE;
}
@@ -309,10 +315,11 @@ void Options::print_help() {
std::cout << std::endl << "CCM Options:" << std::endl;
std::cout << " --version=[VERSION]" << std::endl
<< " "
- << "Cassandra/DSE version to use." << std::endl
+ << "Cassandra/DSE/DDAC version to use." << std::endl
<< " Default:" << std::endl
<< " Cassandra Version: " << server_version().to_string() << std::endl
- << " DSE Version: " << DEFAULT_OPTIONS_DSE_VERSION.to_string() << std::endl;
+ << " DSE Version: " << DEFAULT_OPTIONS_DSE_VERSION.to_string() << std::endl
+ << " DDAC Version: " << DEFAULT_OPTIONS_DDAC_VERSION.to_string() << std::endl;
std::string categories;
for (TestCategory::iterator iterator = TestCategory::begin(); iterator != TestCategory::end();
++iterator) {
@@ -330,16 +337,20 @@ void Options::print_help() {
std::cout << " --dse" << std::endl
<< " "
<< "Indicate server version supplied is DSE." << std::endl;
+ std::cout << " --ddac" << std::endl
+ << " "
+ << "Indicate server version supplied is DDAC." << std::endl;
std::cout << " --dse-credentials=(USERNAME_PASSWORD|INI_FILE)" << std::endl
<< " "
- << "DSE credentials to use for download authentication. The default is " << std::endl
+ << "DSE/DDAC credentials to use for download authentication. The default is "
+ << std::endl
<< " " << dse_credentials().to_string() << "." << std::endl;
std::cout << " --dse-username=[USERNAME]" << std::endl
<< " "
- << "Username to use for DSE download authentication." << std::endl;
+ << "Username to use for DSE/DDAC download authentication." << std::endl;
std::cout << " --dse-password=[PASSWORD]" << std::endl
<< " "
- << "Password to use for DSE download authentication." << std::endl;
+ << "Password to use for DSE/DDAC download authentication." << std::endl;
std::cout << " --git" << std::endl
<< " "
<< "Indicate Cassandra/DSE server download should be obtained from" << std::endl
@@ -408,18 +419,20 @@ void Options::print_settings() {
if (log_tests()) {
std::cout << " Logging driver messages" << std::endl;
}
- if (is_dse()) {
- std::cout << " DSE Version: " << CCM::DseVersion(server_version()).to_string() << std::endl;
+ if (!is_cassandra()) {
+ std::cout << " " << server_type_.to_string()
+ << " Version: " << CCM::DseVersion(server_version()).to_string() << std::endl;
if (!use_install_dir()) {
if (dse_credentials() == CCM::DseCredentialsType::USERNAME_PASSWORD) {
std::cout << " Username: " << dse_username() << std::endl;
std::cout << " Password: " << dse_password() << std::endl;
} else {
- std::cout << " Using INI file for DSE download authentication" << std::endl;
+ std::cout << " Using INI file for DSE/DDAC download authentication" << std::endl;
}
}
} else {
- std::cout << " Cassandra Version: " << server_version().to_string() << std::endl;
+ std::cout << " " << server_type_.to_string() << " Version: " << server_version().to_string()
+ << std::endl;
}
if (use_install_dir()) {
std::cout << " Using installation directory [" << install_dir() << "]" << std::endl;
@@ -456,7 +469,13 @@ bool Options::log_tests() { return is_log_tests_; }
CCM::CassVersion Options::server_version() { return server_version_; }
-bool Options::is_dse() { return is_dse_; }
+CCM::ServerType Options::server_type() { return server_type_; }
+
+bool Options::is_cassandra() { return server_type_ == CCM::ServerType::CASSANDRA; }
+
+bool Options::is_dse() { return server_type_ == CCM::ServerType::DSE; }
+
+bool Options::is_ddac() { return server_type_ == CCM::ServerType::DDAC; }
CCM::DseCredentialsType Options::dse_credentials() {
// Static initialization cannot be guaranteed
@@ -514,7 +533,7 @@ const std::string& Options::private_key() { return private_key_; }
SharedPtr > Options::ccm() {
return new CCM::Bridge(Options::server_version(), Options::use_git(), Options::branch_tag(),
- Options::use_install_dir(), Options::install_dir(), Options::is_dse(),
+ Options::use_install_dir(), Options::install_dir(), Options::server_type(),
CCM::Bridge::DEFAULT_DSE_WORKLOAD, Options::cluster_prefix(),
Options::dse_credentials(), Options::dse_username(),
Options::dse_password(), Options::deployment_type(),
diff --git a/gtests/src/integration/options.hpp b/gtests/src/integration/options.hpp
index fa1600b35..95f93e6d0 100644
--- a/gtests/src/integration/options.hpp
+++ b/gtests/src/integration/options.hpp
@@ -67,17 +67,35 @@ class Options {
*/
static bool log_tests();
/**
- * Get the server version (Cassandra/DSE) to use
+ * Get the server version (Cassandra/DSE/DDAC) to use
*
- * @return Cassandra/DSE version to use
+ * @return Cassandra/DSE/DDAC version to use
*/
static CCM::CassVersion server_version();
+ /**
+ * Get the server type (Cassandra/DSE/DDAC)
+ *
+ * @return Server type
+ */
+ static CCM::ServerType server_type();
+ /**
+ * Flag to determine if Cassandra should be used or not
+ *
+ * @return True if Cassandra should be used; false otherwise
+ */
+ static bool is_cassandra();
/**
* Flag to determine if DSE should be used or not
*
* @return True if DSE should be used; false otherwise
*/
static bool is_dse();
+ /**
+ * Flag to determine if DDAC should be used or not
+ *
+ * @return True if DDAC should be used; false otherwise
+ */
+ static bool is_ddac();
/**
* Get the DSE credentials type (username|password/INI file)
*
@@ -240,13 +258,13 @@ class Options {
*/
static bool is_log_tests_;
/**
- * Server version to use (Cassandra/DSE)
+ * Server version to use (Cassandra/DSE/DDAC)
*/
static CCM::CassVersion server_version_;
/**
- * Flag to indicate if DSE should be used instead of Cassandra
+ * Server type to use
*/
- static bool is_dse_;
+ static CCM::ServerType server_type_;
/**
* Flag to determine if Cassandra should be built from ASF git (github if DSE)
*/
diff --git a/gtests/src/integration/rest_client.cpp b/gtests/src/integration/rest_client.cpp
index 2e36de389..7e931997b 100644
--- a/gtests/src/integration/rest_client.cpp
+++ b/gtests/src/integration/rest_client.cpp
@@ -80,7 +80,8 @@ const Response RestClient::send_request(const Request& request) {
// Start the request and attach the HTTP request to send to the REST server
uv_connect_t connect;
connect.data = &http_request;
- uv_tcp_connect(&connect, &tcp, address.addr(), handle_connected);
+ Address::SocketStorage storage;
+ uv_tcp_connect(&connect, &tcp, address.to_sockaddr(&storage), handle_connected);
uv_run(&loop, UV_RUN_DEFAULT);
uv_loop_close(&loop);
diff --git a/gtests/src/integration/simulacron/simulacron_cluster.cpp b/gtests/src/integration/simulacron/simulacron_cluster.cpp
index 31632dc52..e9f52cb97 100644
--- a/gtests/src/integration/simulacron/simulacron_cluster.cpp
+++ b/gtests/src/integration/simulacron/simulacron_cluster.cpp
@@ -66,7 +66,7 @@ test::SimulacronCluster::SimulacronCluster()
// Determine the release version (for priming nodes)
CCM::CassVersion cassandra_version = Options::server_version();
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
CCM::DseVersion dse_version(cassandra_version);
cassandra_version = dse_version.get_cass_version();
if (cassandra_version == "0.0.0") {
@@ -127,7 +127,7 @@ void test::SimulacronCluster::create_cluster(
}
// Add the DSE version (if applicable)
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
paramters << "&dse_version=" << dse_version_;
cluster_name << dse_version_;
} else {
diff --git a/gtests/src/integration/simulacron/simulacron_integration.hpp b/gtests/src/integration/simulacron/simulacron_integration.hpp
index 03b6c69ba..8b03a7419 100644
--- a/gtests/src/integration/simulacron/simulacron_integration.hpp
+++ b/gtests/src/integration/simulacron/simulacron_integration.hpp
@@ -22,11 +22,12 @@
#include
// Macros to use for grouping Simulacron integration tests together
-#define SIMULACRON_TEST_NAME(test_name) Integration##_##simulacron##_##test_name
+#define SIMULACRON_INTEGRATION_TEST(test_case, test_name) \
+ INTEGRATION_TEST(Simulacron, test_case, test_name)
#define SIMULACRON_INTEGRATION_TEST_F(test_case, test_name) \
- INTEGRATION_TEST_F(simulacron, test_case, test_name)
+ INTEGRATION_TEST_F(Simulacron, test_case, test_name)
#define SIMULACRON_INTEGRATION_TYPED_TEST_P(test_case, test_name) \
- INTEGRATION_TYPED_TEST_P(simulacron, test_case, test_name)
+ INTEGRATION_TYPED_TEST_P(Simulacron, test_case, test_name)
#define CHECK_SIMULACRON_AVAILABLE \
if (!sc_) { \
diff --git a/gtests/src/integration/test_category.cpp b/gtests/src/integration/test_category.cpp
index 50990b1a0..9c22031b4 100644
--- a/gtests/src/integration/test_category.cpp
+++ b/gtests/src/integration/test_category.cpp
@@ -23,7 +23,7 @@
const TestCategory TestCategory::CASSANDRA("CASSANDRA", 0, "Cassandra", "*_Cassandra_*");
const TestCategory TestCategory::DSE("DSE", 1, "DataStax Enterprise", "*_DSE_*");
const TestCategory TestCategory::SIMULACRON("SIMULACRON", SHRT_MAX, "Simulated DSE (and Cassandra)",
- "*_simulacron_*");
+ "*_Simulacron_*");
// Static declarations for test type
std::set TestCategory::constants_;
diff --git a/gtests/src/integration/test_utils.cpp b/gtests/src/integration/test_utils.cpp
index b0ba3579d..26bff54fa 100644
--- a/gtests/src/integration/test_utils.cpp
+++ b/gtests/src/integration/test_utils.cpp
@@ -148,9 +148,9 @@ std::vector test::Utils::explode(const std::string& input,
bool test::Utils::file_exists(const std::string& filename) {
uv_fs_t request;
- int error_code = uv_fs_open(NULL, &request, filename.c_str(), O_RDONLY, 0, NULL);
+ int error_code = uv_fs_stat(NULL, &request, filename.c_str(), NULL);
uv_fs_req_cleanup(&request);
- return error_code != UV_ENOENT;
+ return error_code == 0;
}
std::string test::Utils::indent(const std::string& input, unsigned int indent) {
@@ -274,3 +274,10 @@ bool test::Utils::wait_for_port(const std::string& ip_address, unsigned short po
// Unable to establish connection to node on port
return false;
}
+
+std::string test::Utils::home_directory() {
+ char home[FILE_PATH_SIZE] = { 0 };
+ size_t home_length = sizeof(home);
+ uv_os_homedir(home, &home_length);
+ return std::string(home, home_length);
+}
diff --git a/gtests/src/integration/test_utils.hpp b/gtests/src/integration/test_utils.hpp
index fdf28e80e..fa5641423 100644
--- a/gtests/src/integration/test_utils.hpp
+++ b/gtests/src/integration/test_utils.hpp
@@ -185,6 +185,13 @@ class Utils {
static bool wait_for_port(const std::string& ip_address, unsigned short port,
unsigned int number_of_retries = 100,
unsigned int retry_delay_ms = 100);
+
+ /**
+ * Get the home directory for the current user (not thread safe)
+ *
+ * @return Home directory
+ */
+ static std::string home_directory();
};
} // namespace test
diff --git a/gtests/src/integration/tests/test_auth.cpp b/gtests/src/integration/tests/test_auth.cpp
index f37b6d3ff..1671af4f8 100644
--- a/gtests/src/integration/tests/test_auth.cpp
+++ b/gtests/src/integration/tests/test_auth.cpp
@@ -200,7 +200,7 @@ CASSANDRA_INTEGRATION_TEST_F(AuthenticationTests, BadCredentials) {
// Add the proper logging criteria (based on server version)
CCM::CassVersion cass_version = this->server_version_;
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
cass_version = static_cast(cass_version).get_cass_version();
}
if (cass_version >= "3.10") {
@@ -242,7 +242,7 @@ CASSANDRA_INTEGRATION_TEST_F(AuthenticationTests, AuthenticatorSetErrorNull) {
// Add the proper logging criteria (based on server version)
CCM::CassVersion cass_version = this->server_version_;
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
cass_version = static_cast(cass_version).get_cass_version();
}
if (cass_version >= "3.10") {
diff --git a/gtests/src/integration/tests/test_basics.cpp b/gtests/src/integration/tests/test_basics.cpp
index 6145b4393..c8cf42d0d 100644
--- a/gtests/src/integration/tests/test_basics.cpp
+++ b/gtests/src/integration/tests/test_basics.cpp
@@ -334,7 +334,7 @@ CASSANDRA_INTEGRATION_TEST_F(BasicsTests, NoCompactEnabledConnection) {
CHECK_VERSION(3.0.16);
CHECK_VERSION(3.11.2);
CCM::CassVersion cass_version = server_version_;
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
if (server_version_ >= "6.0.0") {
SKIP_TEST("Unsupported for DataStax Enterprise Version "
<< server_version_.to_string()
diff --git a/gtests/src/integration/tests/test_cluster.cpp b/gtests/src/integration/tests/test_cluster.cpp
index 509d024bd..16aed5614 100644
--- a/gtests/src/integration/tests/test_cluster.cpp
+++ b/gtests/src/integration/tests/test_cluster.cpp
@@ -14,7 +14,12 @@
limitations under the License.
*/
-#include "objects/cluster.hpp"
+#include "integration.hpp"
+
+class ClusterTests : public Integration {
+public:
+ ClusterTests() { is_ccm_requested_ = false; }
+};
/**
* Set local dc to null for dc-aware lbp
@@ -23,7 +28,7 @@
* @test_category configuration
* @expected_result Error out because it is illegal to specify a null local-dc.
*/
-TEST(ClusterTest, SetLoadBalanceDcAwareNullLocalDc) {
+CASSANDRA_INTEGRATION_TEST_F(ClusterTests, SetLoadBalanceDcAwareNullLocalDc) {
test::driver::Cluster cluster;
EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS,
cass_cluster_set_load_balance_dc_aware(cluster.get(), NULL, 99, cass_false));
@@ -36,7 +41,7 @@ TEST(ClusterTest, SetLoadBalanceDcAwareNullLocalDc) {
* @test_category configuration
* @expected_result CASS_ERROR_LIB_BAD_PARAMS.
*/
-TEST(ClusterTest, ExponentialReconnectionPolicyBadParameters) {
+CASSANDRA_INTEGRATION_TEST_F(ClusterTests, ExponentialReconnectionPolicyBadParameters) {
test::driver::Cluster cluster;
// Base delay must be greater than 1
@@ -46,3 +51,17 @@ TEST(ClusterTest, ExponentialReconnectionPolicyBadParameters) {
// Base delay cannot be greater than max delay
EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, cass_cluster_set_exponential_reconnect(cluster.get(), 3, 2));
}
+
+/**
+ * Set invalid parameters for secure connect bundle.
+ *
+ * @jira_ticket CPP-790
+ * @test_category configuration
+ * @expected_result CASS_ERROR_LIB_BAD_PARAMS.
+ */
+CASSANDRA_INTEGRATION_TEST_F(ClusterTests, SecureConnectionBundleBadParameters) {
+ test::driver::Cluster cluster;
+
+ EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, cass_cluster_set_cloud_secure_connection_bundle_n(
+ cluster.get(), "invalid_filename", 16));
+}
diff --git a/gtests/src/integration/tests/test_config.cpp b/gtests/src/integration/tests/test_config.cpp
index 5df00665e..b981c6ca6 100644
--- a/gtests/src/integration/tests/test_config.cpp
+++ b/gtests/src/integration/tests/test_config.cpp
@@ -14,21 +14,21 @@
limitations under the License.
*/
-#include
+#include "integration.hpp"
-#include "cassandra.h"
+class ConfigTests : public Integration {
+public:
+ ConfigTests() { Integration::SetUp(); }
+};
-#include "driver_utils.hpp"
-#include "objects/cluster.hpp"
-
-TEST(ConfigTest, Options) {
+CASSANDRA_INTEGRATION_TEST_F(ConfigTests, Options) {
test::driver::Cluster cluster =
test::driver::Cluster::build().with_connect_timeout(9999u).with_port(7000);
EXPECT_EQ(9999u, test::driver::internals::Utils::connect_timeout(cluster.get()));
EXPECT_EQ(7000, test::driver::internals::Utils::port(cluster.get()));
}
-TEST(ConfigTest, ContactPointsSimple) {
+CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsSimple) {
std::string contact_points = "127.0.0.1,127.0.0.2,127.0.0.3";
test::driver::Cluster cluster =
test::driver::Cluster::build().with_contact_points(contact_points);
@@ -36,7 +36,7 @@ TEST(ConfigTest, ContactPointsSimple) {
test::driver::internals::Utils::contact_points(cluster.get()).c_str());
}
-TEST(ConfigTest, ContactPointsClear) {
+CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsClear) {
std::string contact_points = "127.0.0.1,127.0.0.2,127.0.0.3";
test::driver::Cluster cluster =
test::driver::Cluster::build().with_contact_points(contact_points);
@@ -46,7 +46,7 @@ TEST(ConfigTest, ContactPointsClear) {
EXPECT_TRUE(test::driver::internals::Utils::contact_points(cluster.get()).empty());
}
-TEST(ConfigTest, ContactPointsExtraCommas) {
+CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsExtraCommas) {
std::string contact_points = ",,,,127.0.0.1,,,,127.0.0.2,127.0.0.3,,,,";
test::driver::Cluster cluster =
test::driver::Cluster::build().with_contact_points(contact_points);
@@ -54,7 +54,7 @@ TEST(ConfigTest, ContactPointsExtraCommas) {
test::driver::internals::Utils::contact_points(cluster.get()).c_str());
}
-TEST(ConfigTest, ContactPointsExtraWhitespace) {
+CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsExtraWhitespace) {
std::string contact_points =
" ,\r\n, , , 127.0.0.1 ,,, ,\t127.0.0.2,127.0.0.3, \t\n, ,, ";
test::driver::Cluster cluster =
@@ -63,7 +63,7 @@ TEST(ConfigTest, ContactPointsExtraWhitespace) {
test::driver::internals::Utils::contact_points(cluster.get()).c_str());
}
-TEST(ConfigTest, ContactPointsAppend) {
+CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsAppend) {
test::driver::Cluster cluster = test::driver::Cluster::build().with_contact_points("127.0.0.1");
EXPECT_STREQ("127.0.0.1", test::driver::internals::Utils::contact_points(cluster.get()).c_str());
cluster.with_contact_points("127.0.0.2");
diff --git a/gtests/src/integration/tests/test_control_connection.cpp b/gtests/src/integration/tests/test_control_connection.cpp
index f10934d63..7cca81384 100644
--- a/gtests/src/integration/tests/test_control_connection.cpp
+++ b/gtests/src/integration/tests/test_control_connection.cpp
@@ -273,8 +273,10 @@ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionTwoNodeClusterTests, Reconnection)
* and ensure only the first node is used as the contact point for automatic
* node discovery of the second node
*/
- Cluster cluster = default_cluster().with_load_balance_round_robin().with_contact_points(
- generate_contact_points(ccm_->get_ip_prefix(), 1));
+ Cluster cluster = default_cluster()
+ .with_load_balance_round_robin()
+ .with_constant_reconnect(100)
+ .with_contact_points(generate_contact_points(ccm_->get_ip_prefix(), 1));
Session session = cluster.connect();
// Stop the first node and bootstrap a third node into the cluster
@@ -554,7 +556,8 @@ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionThreeNodeClusterTests, NodeDiscove
*/
CASSANDRA_INTEGRATION_TEST_F(ControlConnectionTests, FullOutage) {
CHECK_FAILURE;
- connect(); // Create the default session
+ Cluster cluster = default_cluster().with_constant_reconnect(100);
+ connect(cluster);
// Stop the cluster and attempt to perform a request
ccm_->stop_cluster();
@@ -567,12 +570,11 @@ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionTests, FullOutage) {
for (unsigned short i = 0; i < cluster_ip_addresses.size(); ++i) {
nodes.insert(i + 1);
}
- reset_logger_criteria("Scheduling reconnect for host ", nodes);
+ reset_logger_criteria("reconnect for host ", nodes);
// Restart the cluster and wait for the nodes to reconnect
ccm_->start_cluster();
ASSERT_TRUE(wait_for_logger(nodes.size()));
- msleep(3000); // TODO: Remove static sleep and check driver logs for reduced wait
// Ensure all nodes are actively used
std::set expected_nodes;
diff --git a/gtests/src/integration/tests/test_dbaas.cpp b/gtests/src/integration/tests/test_dbaas.cpp
new file mode 100644
index 000000000..c82b5e10c
--- /dev/null
+++ b/gtests/src/integration/tests/test_dbaas.cpp
@@ -0,0 +1,746 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "integration.hpp"
+
+#include "process.hpp"
+
+#define PROXY_CREDS_V1_INVALID_CA_FILENAME "creds-v1-invalid-ca.zip"
+#define PROXY_CREDS_V1_UNREACHABLE_FILENAME "creds-v1-unreachable.zip"
+#define PROXY_CREDS_V1_NO_CERT_FILENAME "creds-v1-wo-cert.zip"
+#define PROXY_CREDS_V1_NO_CREDS_FILENAME "creds-v1-wo-creds.zip"
+#define PROXY_CREDS_V1_FILENAME "creds-v1.zip"
+
+#ifdef WIN32
+#define PROXY_RUN_SCRIPT "run.ps1"
+#define PROXY_CREDS_BUNDLES "certs\\bundles\\"
+#else
+#define PROXY_RUN_SCRIPT "run.sh"
+#define PROXY_CREDS_BUNDLES "certs/bundles/"
+#endif
+
+using test::Utils;
+using utils::Process;
+
+/**
+ * Database as a service integration tests
+ */
+class DbaasTests : public Integration {
+public:
+ typedef std::map ServerNames;
+ typedef std::pair ServerPair;
+
+ static void SetUpTestCase() {
+ char* proxy_path = getenv("PROXY_PATH");
+ if (proxy_path) {
+ proxy_path_ = proxy_path;
+ } else {
+ proxy_path_ = Utils::home_directory() + Utils::PATH_SEPARATOR + "proxy";
+ }
+ proxy_path_ += Utils::PATH_SEPARATOR;
+ proxy_run_script_ = proxy_path_ + PROXY_RUN_SCRIPT;
+
+ // Allow the proxy to start itself or use a currently running proxy
+ if (file_exists(proxy_run_script_)) {
+ if (!start_proxy()) {
+ FAIL() << "Unable to start SNI single endpoint proxy service. Check PROXY_PATH environment "
+ "variable"
+#ifdef WIN32
+ << " or ensure proper ExecutionPolicy is set (e.g. Set-ExecutionPolicy -Scope "
+ "CurrentUser Unrestricted); see "
+ "https:/go.microsoft.com/fwlink/?LinkID=135170"
+#endif
+ << ".";
+ }
+ } else {
+ if (!is_proxy_running()) {
+ FAIL()
+ << "SNI single endpoint proxy is not available. Start container before executing test.";
+ }
+ }
+
+ if (!file_exists(proxy_cred_bundles_path_)) {
+ proxy_cred_bundles_path_ = proxy_path_ + proxy_cred_bundles_path_;
+ }
+ if (!file_exists(creds_v1_invalid_ca()) || !file_exists(creds_v1_unreachable()) ||
+ !file_exists(creds_v1_no_cert()) || !file_exists(creds_v1_no_creds()) ||
+ !file_exists(creds_v1())) {
+ FAIL() << "Unable to locate SNI single endpoint credential bundles. Check PROXY_PATH "
+ "environment variable.";
+ }
+ }
+
+ void SetUp() {
+ // Ensure CCM and session are not created for these tests
+ is_ccm_requested_ = false;
+ is_session_requested_ = false;
+ is_schema_metadata_ = true; // Needed for prepared statements
+ Integration::SetUp();
+ }
+
+ static void TearDownTestCase() {
+ if (!Options::keep_clusters()) {
+ stop_proxy();
+ }
+ }
+
+ static std::string creds_v1_invalid_ca() {
+ return proxy_cred_bundles_path_ + PROXY_CREDS_V1_INVALID_CA_FILENAME;
+ }
+
+ static std::string creds_v1_unreachable() {
+ return proxy_cred_bundles_path_ + PROXY_CREDS_V1_UNREACHABLE_FILENAME;
+ }
+
+ static std::string creds_v1_no_cert() {
+ return proxy_cred_bundles_path_ + PROXY_CREDS_V1_NO_CERT_FILENAME;
+ }
+
+ static std::string creds_v1_no_creds() {
+ return proxy_cred_bundles_path_ + PROXY_CREDS_V1_NO_CREDS_FILENAME;
+ }
+
+ static std::string creds_v1() { return proxy_cred_bundles_path_ + PROXY_CREDS_V1_FILENAME; }
+
+ int get_node_id(const std::string& rpc_address) {
+ std::vector octects = explode(rpc_address, '.');
+ std::stringstream ss(octects[octects.size() - 1]);
+ int node = 0;
+ if ((ss >> node).fail()) {
+ EXPECT_TRUE(false) << "Unable to parse node number from rpc_address";
+ }
+ return node;
+ }
+
+ /**
+ * Vector of server names sorted by node number (e.g. last octet in real IP address)
+ */
+ ServerNames get_server_names() {
+ ServerNames map;
+ {
+ Cluster cluster = default_cluster(false)
+ .with_randomized_contact_points(false)
+ .with_load_balance_round_robin();
+ EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ Session session = cluster.connect();
+ for (int i = 0; i < 3; ++i) {
+ Row row = session.execute(SELECT_ALL_SYSTEM_LOCAL_CQL).first_row();
+ int node = get_node_id(row.column_by_name("rpc_address").str());
+ map.insert(ServerPair(node, row.column_by_name("host_id").str()));
+ }
+ }
+ return map;
+ }
+
+ bool start_cluster() {
+ Process::Args args;
+ args.push_back("start");
+ args.push_back("--root");
+ args.push_back("--wait-for-binary-proto");
+ args.push_back("--jvm_arg=-Ddse.product_type=DATASTAX_APOLLO");
+ return ccm_execute(args);
+ }
+
+ bool stop_cluster() {
+ Process::Args args;
+ args.push_back("stop");
+ return ccm_execute(args);
+ }
+
+ bool start_node(int node) {
+ Process::Args args;
+ args.push_back(node_name(node));
+ args.push_back("start");
+ args.push_back("--root");
+ args.push_back("--wait-for-binary-proto");
+ args.push_back("--jvm_arg=-Ddse.product_type=DATASTAX_APOLLO");
+ return ccm_execute(args);
+ }
+
+ bool stop_node(int node) {
+ Process::Args args;
+ args.push_back(node_name(node));
+ args.push_back("stop");
+ return ccm_execute(args);
+ }
+
+private:
+ std::string node_name(int node) {
+ std::stringstream node_name;
+ node_name << "node" << node;
+ return node_name.str();
+ }
+
+ bool ccm_execute(Process::Args args) {
+ Process::Args command;
+ command.push_back("docker");
+ command.push_back("exec");
+ command.push_back(get_proxy_id());
+ command.push_back("ccm");
+ command.insert(command.end(), args.begin(), args.end());
+ Process::Result result = Process::execute(command);
+ return result.exit_status == 0;
+ }
+
+private:
+ static std::string get_proxy_id() {
+ if (proxy_id_.empty()) {
+ Process::Args command;
+ command.push_back("docker");
+ command.push_back("ps");
+ command.push_back("-aqf");
+ command.push_back("ancestor=single_endpoint");
+ Process::Result result = Process::execute(command);
+ proxy_id_ = Utils::trim(result.standard_output);
+ }
+ return proxy_id_;
+ }
+
+ static bool is_proxy_running() { return !get_proxy_id().empty(); }
+
+ static bool start_proxy() {
+ if (is_proxy_running()) return true;
+
+ Process::Args command;
+#ifdef WIN32
+ command.push_back("powershell");
+#endif
+ command.push_back(proxy_run_script_);
+ Process::Result result = Process::execute(command);
+ return result.exit_status == 0;
+ }
+
+ static bool stop_proxy() {
+ Process::Args command;
+ command.push_back("docker");
+ command.push_back("kill");
+ command.push_back(get_proxy_id());
+ Process::Result result = Process::execute(command);
+ return result.exit_status == 0;
+ }
+
+private:
+ static std::string proxy_path_;
+ static std::string proxy_cred_bundles_path_;
+ static std::string proxy_run_script_;
+ static std::string proxy_id_;
+};
+
+std::string DbaasTests::proxy_path_;
+std::string DbaasTests::proxy_cred_bundles_path_ = PROXY_CREDS_BUNDLES;
+std::string DbaasTests::proxy_run_script_ = PROXY_RUN_SCRIPT;
+std::string DbaasTests::proxy_id_;
+
+/**
+ * Perform connection to DBaaS SNI single endpoint docker image.
+ *
+ * This test will perform a connection to a DBaaS SNI single endpoint while ensuring proper
+ * automatic cloud configuration with address resolution.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @since 2.14.0
+ * @expected_result Successful address resolution and connection.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ResolveAndConnect) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+}
+
+/**
+ * Perform query using a simple statement against the DBaaS SNI single endpoint docker image.
+ *
+ * This test will perform a connection and execute a simple statement query against the
+ * system.local table to ensure query execution to a DBaaS SNI single endpoint while validating the
+ * results.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @test_category queries
+ * @since 2.14.0
+ * @expected_result Simple statement is executed and nodes are validated.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, QueryEachNode) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false).with_load_balance_round_robin();
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ ServerNames server_names;
+ for (int i = 0; i < 3; ++i) {
+ Result result = session_.execute(SELECT_ALL_SYSTEM_LOCAL_CQL);
+ Uuid expected_host_id = Uuid(result.server_name());
+ Row row = result.first_row();
+
+ Uuid host_id = row.column_by_name("host_id");
+ int node = get_node_id(row.column_by_name("rpc_address").str());
+ EXPECT_NE(0, node);
+ EXPECT_EQ(expected_host_id, host_id);
+ server_names.insert(ServerPair(node, host_id.str()));
+ }
+
+ EXPECT_EQ(3u, server_names.size()); // Ensure all three nodes were queried
+}
+
+/**
+ * Create function and aggregate definitions and ensure the schema metadata is reflected when
+ * execute against the DBaaS SNI single endpoint docker image.
+ *
+ * This test will perform a connection and execute create function/aggregate queries to ensure
+ * schema metadata using a DBaaS SNI single endpoint is handled properly.
+ *
+ * @jira_ticket CPP-815
+ * @test_category dbaas
+ * @test_category queries:schema_metadata:udf
+ * @since 2.14.0
+ * @expected_result Function/Aggregate definitions schema metadata are validated.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, SchemaMetadata) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ // clang-format off
+ session_.execute("CREATE OR REPLACE FUNCTION avg_state(state tuple, val int) "
+ "CALLED ON NULL INPUT RETURNS tuple "
+ "LANGUAGE java AS "
+ "'if (val != null) {"
+ "state.setInt(0, state.getInt(0) + 1);"
+ "state.setLong(1, state.getLong(1) + val.intValue());"
+ "};"
+ "return state;'"
+ ";");
+ session_.execute("CREATE OR REPLACE FUNCTION avg_final (state tuple) "
+ "CALLED ON NULL INPUT RETURNS double "
+ "LANGUAGE java AS "
+ "'double r = 0;"
+ "if (state.getInt(0) == 0) return null;"
+ "r = state.getLong(1);"
+ "r /= state.getInt(0);"
+ "return Double.valueOf(r);'"
+ ";");
+ session_.execute("CREATE OR REPLACE AGGREGATE average(int) "
+ "SFUNC avg_state STYPE tuple FINALFUNC avg_final "
+ "INITCOND(0, 0);");
+ // clang-format on
+
+ const CassSchemaMeta* schema_meta = cass_session_get_schema_meta(session_.get());
+ ASSERT_TRUE(schema_meta != NULL);
+ const CassKeyspaceMeta* keyspace_meta =
+ cass_schema_meta_keyspace_by_name(schema_meta, default_keyspace().c_str());
+ ASSERT_TRUE(keyspace_meta != NULL);
+
+ { // Function `avg_state`
+ const char* data = NULL;
+ size_t length = 0;
+ const CassDataType* datatype = NULL;
+
+ const CassFunctionMeta* function_meta =
+ cass_keyspace_meta_function_by_name(keyspace_meta, "avg_state", "tuple,int");
+ ASSERT_TRUE(function_meta != NULL);
+ cass_function_meta_name(function_meta, &data, &length);
+ EXPECT_EQ("avg_state", std::string(data, length));
+ cass_function_meta_full_name(function_meta, &data, &length);
+ EXPECT_EQ("avg_state(tuple,int)", std::string(data, length));
+ cass_function_meta_body(function_meta, &data, &length);
+ EXPECT_EQ("if (val != null) {state.setInt(0, state.getInt(0) + 1);state.setLong(1, "
+ "state.getLong(1) + val.intValue());};return state;",
+ std::string(data, length));
+ cass_function_meta_language(function_meta, &data, &length);
+ EXPECT_EQ("java", std::string(data, length));
+ EXPECT_TRUE(cass_function_meta_called_on_null_input(function_meta));
+ ASSERT_EQ(2u, cass_function_meta_argument_count(function_meta));
+ cass_function_meta_argument(function_meta, 0, &data, &length, &datatype);
+ EXPECT_EQ("state", std::string(data, length));
+ EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype));
+ ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype));
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0)));
+ EXPECT_EQ(CASS_VALUE_TYPE_BIGINT,
+ cass_data_type_type(cass_data_type_sub_data_type(datatype, 1)));
+ cass_function_meta_argument(function_meta, 1, &data, &length, &datatype);
+ EXPECT_EQ("val", std::string(data, length));
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(datatype));
+ datatype = cass_function_meta_argument_type_by_name(function_meta, "state");
+ EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype));
+ ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype));
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0)));
+ EXPECT_EQ(CASS_VALUE_TYPE_BIGINT,
+ cass_data_type_type(cass_data_type_sub_data_type(datatype, 1)));
+ datatype = cass_function_meta_argument_type_by_name(function_meta, "val");
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(datatype));
+ datatype = cass_function_meta_return_type(function_meta);
+ EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype));
+ ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype));
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0)));
+ EXPECT_EQ(CASS_VALUE_TYPE_BIGINT,
+ cass_data_type_type(cass_data_type_sub_data_type(datatype, 1)));
+ }
+
+ { // Aggregate `average`
+ const char* data = NULL;
+ size_t length = 0;
+ const CassDataType* datatype = NULL;
+
+ const CassAggregateMeta* aggregate_meta =
+ cass_keyspace_meta_aggregate_by_name(keyspace_meta, "average", "int");
+ ASSERT_TRUE(aggregate_meta != NULL);
+ cass_aggregate_meta_name(aggregate_meta, &data, &length);
+ EXPECT_EQ("average", std::string(data, length));
+ cass_aggregate_meta_full_name(aggregate_meta, &data, &length);
+ EXPECT_EQ("average(int)", std::string(data, length));
+ size_t count = cass_aggregate_meta_argument_count(aggregate_meta);
+ ASSERT_EQ(1u, cass_aggregate_meta_argument_count(aggregate_meta));
+ datatype = cass_aggregate_meta_argument_type(aggregate_meta, 0);
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(datatype));
+ datatype = cass_aggregate_meta_return_type(aggregate_meta);
+ EXPECT_EQ(CASS_VALUE_TYPE_DOUBLE, cass_data_type_type(datatype));
+ datatype = cass_aggregate_meta_state_type(aggregate_meta);
+ EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype));
+ ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype));
+ EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0)));
+ EXPECT_EQ(CASS_VALUE_TYPE_BIGINT,
+ cass_data_type_type(cass_data_type_sub_data_type(datatype, 1)));
+ const CassFunctionMeta* function_meta = cass_aggregate_meta_state_func(aggregate_meta);
+ cass_function_meta_name(function_meta, &data, &length);
+ EXPECT_EQ("avg_state", std::string(data, length));
+ function_meta = cass_aggregate_meta_final_func(aggregate_meta);
+ cass_function_meta_name(function_meta, &data, &length);
+ EXPECT_EQ("avg_final", std::string(data, length));
+ const CassValue* initcond = cass_aggregate_meta_init_cond(aggregate_meta);
+ EXPECT_EQ(CASS_VALUE_TYPE_VARCHAR, cass_value_type(initcond));
+ EXPECT_EQ(Text("(0, 0)"), Text(initcond));
+ ASSERT_TRUE(true);
+ }
+
+ cass_schema_meta_free(schema_meta);
+}
+
+/**
+ * Ensure guardrails are enabled when performing a query against the DBaaS SNI single endpoint
+ * docker image.
+ *
+ * This test will perform a connection and execute a simple insert statement query against the
+ * server using a valid consistency level.DBaaS SNI single endpoint while validating the
+ * insert occured.
+ *
+ * @jira_ticket CPP-813
+ * @test_category dbaas
+ * @test_category queries:guard_rails
+ * @since 2.14.0
+ * @expected_result Simple statement is executed and is validated.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ConsistencyGuardrails) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ session_.execute(
+ format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, default_table().c_str(), "int", "int"));
+ CHECK_FAILURE;
+
+ session_.execute(Statement(
+ format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, default_table().c_str(), "0", "1")));
+ Result result = session_.execute(
+ Statement(format_string(CASSANDRA_SELECT_VALUE_FORMAT, default_table().c_str(), "0")));
+ EXPECT_EQ(1u, result.row_count());
+ ASSERT_EQ(1u, result.column_count());
+ ASSERT_EQ(Integer(1), result.first_row().next().as());
+}
+
+/**
+ * Ensure guardrails are enabled when performing a query against the DBaaS SNI single endpoint
+ * docker image.
+ *
+ * This test will perform a connection and execute a simple statement query against the
+ * server using an invalid consistency level.DBaaS SNI single endpoint while validating the
+ * error.
+ *
+ * @jira_ticket CPP-813
+ * @test_category dbaas
+ * @test_category queries:guard_rails
+ * @since 2.14.0
+ * @expected_result Simple statement is executed and guard rail error is validated.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ConsistencyGuardrailsInvalid) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ session_.execute(
+ format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, default_table().c_str(), "int", "int"));
+ CHECK_FAILURE
+
+ Statement statement(
+ format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, default_table().c_str(), "0", "1"));
+ statement.set_consistency(
+ CASS_CONSISTENCY_LOCAL_ONE); // Override default DBaaS configured consistency
+ Result result = session_.execute(statement, false);
+ EXPECT_TRUE(result.error_code() != CASS_OK)
+ << "Statement execution succeeded; guardrails may not be enabled";
+ EXPECT_TRUE(contains(result.error_message(),
+ "Provided value LOCAL_ONE is not allowed for Write Consistency Level"));
+}
+
+/**
+ * Perform query ensuring token aware is enabled by default.
+ *
+ * This test will perform a connection and execute a insert query against to ensure that token
+ * aware is enabled by default when automatically configured .
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @test_category queries
+ * @since 2.14.0
+ * @expected_result Simple statement is executed and validated against replicas.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, DcAwareTokenAwareRoutingDefault) {
+ CHECK_FAILURE;
+
+ ServerNames server_names = get_server_names();
+
+ // Validate replicas are used during token aware routing
+ std::vector > replicas;
+ replicas.push_back(std::pair(0, 2)); // query key, node id (last octet of rpc_address)
+ replicas.push_back(std::pair(1, 2));
+ replicas.push_back(std::pair(2, 2));
+ replicas.push_back(std::pair(3, 1));
+ replicas.push_back(std::pair(4, 3));
+ replicas.push_back(std::pair(5, 2));
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ for (std::vector >::iterator it = replicas.begin(), end = replicas.end();
+ it != end; ++it) {
+ Statement statement(SELECT_ALL_SYSTEM_LOCAL_CQL, 1);
+ statement.set_consistency(CASS_CONSISTENCY_ONE);
+ statement.add_key_index(0);
+ statement.set_keyspace("system");
+ statement.bind(0, Integer(it->first));
+
+ Result result = session_.execute(
+ statement, false); // No bind variables exist so statement will return error
+ EXPECT_EQ(server_names[it->second], result.server_name());
+ }
+}
+
+/**
+ * Attempt connection to DBaaS SNI single endpoint docker image manually setting auth.
+ *
+ * This test will perform a connection to a DBaaS SNI single endpoint while ensuring proper
+ * automatic cloud configuration with address resolution where the authentication is not available.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas:auth
+ * @since 2.14.0
+ * @expected_result Successful address resolution and connection.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ResolveAndConnectWithoutCredsInBundle) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1_no_creds().c_str()));
+ cluster.with_credentials("cassandra", "cassandra");
+ connect(cluster);
+}
+
+/**
+ * Attempt connection to DBaaS SNI single endpoint docker image leaving auth unset.
+ *
+ * This test will perform a connection to a DBaaS SNI single endpoint while ensuring proper
+ * automatic cloud configuration with address resolution where the authentication is not set.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @since 2.14.0
+ * @expected_result Failed to establish a connection.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidWithoutCreds) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1_no_creds().c_str()));
+ try {
+ connect(cluster);
+ EXPECT_TRUE(false) << "Connection established";
+ } catch (Session::Exception& se) {
+ EXPECT_EQ(CASS_ERROR_SERVER_BAD_CREDENTIALS, se.error_code());
+ }
+}
+
+/**
+ * Attempt connection to DBaaS SNI single endpoint docker image using invalid metadata server.
+ *
+ * This test will attempt a connection to a DBaaS SNI single endpoint using an invalid metadata
+ * server. The connection should not succeed as no resolution will be possible.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @since 2.14.0
+ * @expected_result Failed to establish a connection.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidMetadataServer) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1_unreachable().c_str()));
+ try {
+ connect(cluster);
+ EXPECT_TRUE(false) << "Connection established";
+ } catch (Session::Exception& se) {
+ EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, se.error_code());
+ }
+}
+
+/**
+ * Attempt connection to DBaaS SNI single endpoint docker image using invalid certificate.
+ *
+ * This test will attempt a connection to a DBaaS SNI single endpoint using an invalid certificate.
+ * The connection should not succeed as no resolution will be possible.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @since 2.14.0
+ * @expected_result Failed to establish a connection.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidCertificate) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS,
+ cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1_no_cert().c_str()));
+ try {
+ connect(cluster);
+ EXPECT_TRUE(false) << "Connection established";
+ } catch (Session::Exception& se) {
+ EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, se.error_code());
+ }
+}
+
+/**
+ * Attempt connection to DBaaS SNI single endpoint docker image using invalid CA.
+ *
+ * This test will attempt a connection to a DBaaS SNI single endpoint using an invalid CA. The
+ * connection should not succeed as no resolution will be possible.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @since 2.14.0
+ * @expected_result Failed to establish a connection.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidCertificateAuthority) {
+ CHECK_FAILURE;
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1_invalid_ca().c_str()));
+ try {
+ connect(cluster);
+ EXPECT_TRUE(false) << "Connection established";
+ } catch (Session::Exception& se) {
+ EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, se.error_code());
+ }
+}
+
+/**
+ * Perform query with nodes down against the DBaaS SNI single endpoint docker image.
+ *
+ * This test will perform a connection and execute a simple statement query against the
+ * system.local table to ensure query execution to a DBaaS SNI single endpoint while validating the
+ * results.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @test_category queries
+ * @since 2.14.0
+ * @expected_result Simple statement is executed and validated while node(s) are down.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, QueryWithNodesDown) {
+ CHECK_FAILURE;
+
+ ServerNames server_names = get_server_names();
+
+ Cluster cluster = default_cluster(false);
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ EXPECT_TRUE(stop_node(1));
+ for (int i = 0; i < 8; ++i) {
+ EXPECT_NE(server_names[1], session_.execute(SELECT_ALL_SYSTEM_LOCAL_CQL).server_name());
+ }
+
+ EXPECT_TRUE(stop_node(3));
+ for (int i = 0; i < 8; ++i) {
+ EXPECT_EQ(server_names[2], session_.execute(SELECT_ALL_SYSTEM_LOCAL_CQL).server_name());
+ }
+
+ EXPECT_TRUE(start_cluster());
+}
+
+/**
+ * Ensure reconnection occurs during full outage.
+ *
+ * This test will perform a connection, full outage will occur and the the cluster will be restarted
+ * while executing a simple statement query against the system.local table to ensure reconnection
+ * after full outage.
+ *
+ * @jira_ticket CPP-787
+ * @test_category dbaas
+ * @test_category queries
+ * @since 2.14.0
+ * @expected_result Simple statement is executed and validated after full outage.
+ */
+CASSANDRA_INTEGRATION_TEST_F(DbaasTests, FullOutage) {
+ CHECK_FAILURE;
+
+ ServerNames server_names = get_server_names();
+
+ Cluster cluster = default_cluster(false).with_constant_reconnect(10); // Quick reconnect
+ ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster.get(), creds_v1().c_str()));
+ connect(cluster);
+
+ EXPECT_TRUE(stop_cluster());
+
+ Statement statement(SELECT_ALL_SYSTEM_LOCAL_CQL);
+ EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, session_.execute(statement, false).error_code());
+
+ EXPECT_TRUE(start_cluster());
+ EXPECT_EQ(CASS_OK, session_.execute(statement).error_code());
+}
diff --git a/gtests/src/integration/tests/test_exec_profile.cpp b/gtests/src/integration/tests/test_exec_profile.cpp
index ec565f2c8..8f23b1ae9 100644
--- a/gtests/src/integration/tests/test_exec_profile.cpp
+++ b/gtests/src/integration/tests/test_exec_profile.cpp
@@ -302,8 +302,15 @@ CASSANDRA_INTEGRATION_TEST_F(ExecutionProfileTest, Consistency) {
batch.set_execution_profile("consistency");
result = session_.execute(batch, false);
ASSERT_EQ(CASS_ERROR_SERVER_INVALID_QUERY, result.error_code());
- ASSERT_TRUE(contains(result.error_message(),
- "SERIAL is not supported as conditional update commit consistency"));
+ CCM::CassVersion cass_version = server_version_;
+ if (!Options::is_cassandra()) {
+ cass_version = static_cast(cass_version).get_cass_version();
+ }
+ std::string expected_message = "SERIAL is not supported as conditional update commit consistency";
+ if (cass_version >= "4.0.0") {
+ expected_message = "You must use conditional updates for serializable writes";
+ }
+ ASSERT_TRUE(contains(result.error_message(), expected_message));
// Execute a simple query with assigned profile (should fail)
insert_.set_execution_profile("consistency");
diff --git a/gtests/src/integration/tests/test_null_string_params.cpp b/gtests/src/integration/tests/test_null_string_params.cpp
index 7a618b376..b44564376 100644
--- a/gtests/src/integration/tests/test_null_string_params.cpp
+++ b/gtests/src/integration/tests/test_null_string_params.cpp
@@ -96,7 +96,7 @@ class SchemaNullStringApiArgsTest : public NullStringApiArgsTest {
if (server_version_ >= "3.0.0") {
session_.execute(format_string("CREATE MATERIALIZED VIEW %s "
- "AS SELECT value "
+ "AS SELECT value, key "
" FROM %s"
" WHERE value IS NOT NULL and key IS NOT NULL "
"PRIMARY KEY(value, key)",
@@ -319,9 +319,9 @@ CASSANDRA_INTEGRATION_TEST_F(SchemaNullStringApiArgsTest, MaterializedViewMetaFu
*/
CASSANDRA_INTEGRATION_TEST_F(SchemaNullStringApiArgsTest, FunctionAndAggregateMetaFunctions) {
CHECK_VERSION(2.2.0);
- // C* 3.x and later annotate collection columns as frozen.
+ // C* 3.x annotate collection columns as frozen.
const CassFunctionMeta* function_meta =
- (schema_meta_.version().major_version >= 3)
+ (schema_meta_.version().major_version == 3)
? cass_keyspace_meta_function_by_name(keyspace_meta_.get(), "avg_final",
"frozen>")
: cass_keyspace_meta_function_by_name(keyspace_meta_.get(), "avg_final",
diff --git a/gtests/src/integration/tests/test_prepared.cpp b/gtests/src/integration/tests/test_prepared.cpp
new file mode 100644
index 000000000..4dd52158d
--- /dev/null
+++ b/gtests/src/integration/tests/test_prepared.cpp
@@ -0,0 +1,110 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "integration.hpp"
+
+/**
+ * Prepared integration tests; common operations
+ */
+class PreparedTests : public Integration {
+ void SetUp() {
+ is_keyspace_change_requested_ = false;
+ Integration::SetUp();
+ }
+};
+
+/**
+ * Execute a statement that forces a re-prepare resulting in a new prepared ID that fails fast and
+ * returns an error.
+ *
+ * This test will create a new table, prepare a statement using a fully qualified query, update the
+ * default keyspace, then drop and re-create the table to force the server to invalidate the
+ * prepared ID. After the table is dropped the prepared statement will be used to execute an insert
+ * query that will result in an error being returned when re-using the original prepared statement.
+ *
+ * @see: https://issues.apache.org/jira/browse/CASSANDRA-15252 (Server version restriction may need
+ * to be added if/when Apache Cassandra issue is addressed.
+ *
+ * @test_category error
+ * @test_category queries:prepared
+ * @since core:2.14.0
+ * @expected_result Re-prepare will fail fast and return error.
+ */
+CASSANDRA_INTEGRATION_TEST_F(PreparedTests, FailFastWhenPreparedIDChangesDuringReprepare) {
+ CHECK_FAILURE;
+
+ // Create the table and initial prepared statement
+ session_.execute(format_string(CASSANDRA_KEY_VALUE_QUALIFIED_TABLE_FORMAT, keyspace_name_.c_str(),
+ table_name_.c_str(), "int", "int"));
+ Prepared insert_prepared =
+ session_.prepare(format_string(CASSANDRA_KEY_VALUE_QUALIFIED_INSERT_FORMAT,
+ keyspace_name_.c_str(), table_name_.c_str(), "?", "?"));
+
+ // Update the current keyspace for the session
+ ASSERT_TRUE(use_keyspace(keyspace_name_));
+
+ // Drop and re-create the table to invalidate the prepared statement on the server
+ drop_table(table_name_);
+ session_.execute(format_string(CASSANDRA_KEY_VALUE_QUALIFIED_TABLE_FORMAT, keyspace_name_.c_str(),
+ table_name_.c_str(), "int", "int"));
+
+ // Execute the insert statement and validate the error code
+ logger_.add_critera("ID mismatch while trying to prepare query");
+ Statement insert_statement = insert_prepared.bind();
+ insert_statement.bind(0, Integer(0));
+ insert_statement.bind(1, Integer(1));
+ Result result = session_.execute(insert_statement, false);
+ EXPECT_TRUE(contains(result.error_message(), "ID mismatch while trying to prepare query"));
+}
+
+/**
+ * Execute a statement that forces a re-prepare resulting in a same prepared ID.
+ *
+ * This test will connect to a cluster and use a keyspace, prepare a statement using a unqualified
+ * query, then drop and re-create the table to force the server to invalidate the
+ * prepared ID. After the table is dropped the prepared statement will be used to execute an insert
+ * query that will result the statement being re-prepared and the insert statement succeeding.
+ *
+ * @test_category queries:prepared
+ * @since core:1.0.0
+ * @expected_result Re-prepare will correctly execute the insert statement.
+ */
+CASSANDRA_INTEGRATION_TEST_F(PreparedTests, PreparedIDUnchangedDuringReprepare) {
+ CHECK_FAILURE;
+
+ // Allow for unqualified queries
+ use_keyspace(keyspace_name_);
+
+ // Create the table and initial prepared statement
+ session_.execute(
+ format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, table_name_.c_str(), "int", "int"));
+ Prepared insert_prepared = session_.prepare(
+ format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, table_name_.c_str(), "?", "?"));
+
+ // Drop and re-create the table to invalidate the prepared statement on the server
+ drop_table(table_name_);
+ session_.execute(
+ format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, table_name_.c_str(), "int", "int"));
+
+ // Execute the insert statement and validate success
+ logger_.add_critera("Prepared query with ID");
+ Statement insert_statement = insert_prepared.bind();
+ insert_statement.bind(0, Integer(0));
+ insert_statement.bind(1, Integer(1));
+ Result result = session_.execute(insert_statement, false);
+ EXPECT_EQ(CASS_OK, result.error_code());
+ EXPECT_EQ(1u, logger_.count());
+}
diff --git a/gtests/src/integration/tests/test_schema_metadata.cpp b/gtests/src/integration/tests/test_schema_metadata.cpp
index 8adbc07f7..29c74ecf3 100644
--- a/gtests/src/integration/tests/test_schema_metadata.cpp
+++ b/gtests/src/integration/tests/test_schema_metadata.cpp
@@ -61,7 +61,7 @@ class SchemaMetadataTest : public Integration {
if (server_version_ >= "3.0.0") {
session_.execute(format_string("CREATE MATERIALIZED VIEW %s "
- "AS SELECT value "
+ "AS SELECT value, key "
" FROM %s"
" WHERE value IS NOT NULL and key IS NOT NULL "
"PRIMARY KEY(value, key)",
diff --git a/gtests/src/integration/tests/test_session.cpp b/gtests/src/integration/tests/test_session.cpp
index 24234009c..3c01f2f6c 100644
--- a/gtests/src/integration/tests/test_session.cpp
+++ b/gtests/src/integration/tests/test_session.cpp
@@ -140,7 +140,7 @@ CASSANDRA_INTEGRATION_TEST_F(SessionTest, ExternalHostListener) {
// Restart node 1 (up event)
ccm_->start_node(1);
CCM::CassVersion cass_version = this->server_version_;
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
cass_version = static_cast(cass_version).get_cass_version();
}
if (cass_version >= "2.2") {
diff --git a/gtests/src/integration/tests/test_startup_options.cpp b/gtests/src/integration/tests/test_startup_options.cpp
index 087252e98..77d759038 100644
--- a/gtests/src/integration/tests/test_startup_options.cpp
+++ b/gtests/src/integration/tests/test_startup_options.cpp
@@ -19,7 +19,7 @@
/**
* Startup options integration tests
*/
-class StartupOptionssTests : public Integration {};
+class StartupOptionsTests : public Integration {};
/**
* Verify driver name and version are assigned in startup options.
@@ -34,10 +34,10 @@ class StartupOptionssTests : public Integration {};
* @cassandra_version 4.0.0
* @expected_result Driver startup options are validated.
*/
-CASSANDRA_INTEGRATION_TEST_F(StartupOptionssTests, DriverOptions) {
+CASSANDRA_INTEGRATION_TEST_F(StartupOptionsTests, DriverOptions) {
CHECK_FAILURE;
CHECK_VERSION(4.0.0);
- if (Options::is_dse()) {
+ if (!Options::is_cassandra()) {
SKIP_TEST("Unsupported for DataStax Enterprise Version "
<< server_version_.to_string() << ": 'system_views.clients' is unavailable");
}
diff --git a/gtests/src/integration/tests/test_statement.cpp b/gtests/src/integration/tests/test_statement.cpp
index 20f1930ef..e51d5dd90 100644
--- a/gtests/src/integration/tests/test_statement.cpp
+++ b/gtests/src/integration/tests/test_statement.cpp
@@ -104,6 +104,11 @@ CASSANDRA_INTEGRATION_TEST_F(StatementTests, SetHostWhereHostIsDown) {
EXPECT_EQ(result.error_code(), CASS_ERROR_LIB_NO_HOSTS_AVAILABLE);
}
+class StatementNoClusterTests : public StatementTests {
+public:
+ StatementNoClusterTests() { is_ccm_requested_ = false; }
+};
+
/**
* Set a host on a statement using valid host strings.
*
@@ -111,7 +116,7 @@ CASSANDRA_INTEGRATION_TEST_F(StatementTests, SetHostWhereHostIsDown) {
* @test_category configuration
* @expected_result Success
*/
-TEST(StatementTest, SetHostWithValidHostString) {
+CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithValidHostString) {
Statement statement("");
EXPECT_EQ(cass_statement_set_host(statement.get(), "127.0.0.1", 9042), CASS_OK);
EXPECT_EQ(cass_statement_set_host(statement.get(), "::1", 9042), CASS_OK);
@@ -127,7 +132,7 @@ TEST(StatementTest, SetHostWithValidHostString) {
* @test_category configuration
* @expected_result Failure with the bad parameters error.
*/
-TEST(StatementTest, SetHostWithInvalidHostString) {
+CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithInvalidHostString) {
Statement statement("");
EXPECT_EQ(cass_statement_set_host(statement.get(), "notvalid", 9042), CASS_ERROR_LIB_BAD_PARAMS);
EXPECT_EQ(cass_statement_set_host(statement.get(), "", 9042), CASS_ERROR_LIB_BAD_PARAMS);
@@ -141,7 +146,7 @@ TEST(StatementTest, SetHostWithInvalidHostString) {
* @test_category configuration
* @expected_result Success
*/
-TEST(StatementTest, SetHostWithValidHostInet) {
+CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithValidHostInet) {
Statement statement("");
CassInet valid;
ASSERT_EQ(cass_inet_from_string("127.0.0.1", &valid), CASS_OK);
@@ -162,7 +167,7 @@ TEST(StatementTest, SetHostWithValidHostInet) {
* @test_category configuration
* @expected_result Failure with the bad parameters error.
*/
-TEST(StatementTest, SetHostWithInvalidHostInet) {
+CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithInvalidHostInet) {
Statement statement("");
CassInet invalid;
invalid.address_length = 3; // Only 4 or 16 is valid (IPv4 and IPv6)
diff --git a/gtests/src/unit/http_server.cpp b/gtests/src/unit/http_server.cpp
new file mode 100644
index 000000000..aa93df486
--- /dev/null
+++ b/gtests/src/unit/http_server.cpp
@@ -0,0 +1,121 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "http_server.hpp"
+
+using datastax::String;
+using datastax::internal::Memory;
+using datastax::internal::OStringStream;
+using datastax::internal::ScopedMutex;
+using datastax::internal::core::Address;
+using datastax::internal::core::EventLoop;
+using datastax::internal::core::Task;
+
+String response(int status, const String& body = "", const String& content_type = "") {
+ OStringStream ss;
+ ss << "HTTP/1.0 " << status << " " << http_status_str(static_cast(status)) << "\r\n";
+ if (!body.empty()) {
+ ss << "Content-Type: ";
+ if (content_type.empty()) {
+ ss << "text/plain";
+ } else {
+ ss << content_type;
+ }
+ ss << "\r\nContent-Length: " << body.size() << "\r\n\r\n" << body;
+ } else {
+ ss << "\r\n";
+ }
+
+ return ss.str();
+}
+
+using namespace mockssandra;
+using namespace mockssandra::http;
+
+void Server::listen() {
+ server_connection_->listen(&event_loop_group_);
+ server_connection_->wait_listen();
+}
+
+void Server::close() {
+ if (server_connection_) {
+ server_connection_->close();
+ server_connection_->wait_close();
+ }
+}
+
+bool Server::use_ssl(const String& key, const String& cert, const String& ca_cert /*= ""*/,
+ bool require_client_cert /*= false*/) {
+ return server_connection_->use_ssl(key, cert, ca_cert, require_client_cert);
+}
+
+Server::ClientConnection::ClientConnection(internal::ServerConnection* server_connection,
+ Server* server)
+ : internal::ClientConnection(server_connection)
+ , path_(server->path())
+ , content_type_(server->content_type())
+ , response_body_(server->response_body())
+ , response_status_code_(server->response_status_code())
+ , enable_valid_response_(server->enable_valid_response())
+ , close_connnection_after_request_(server->close_connnection_after_request()) {
+ http_parser_init(&parser_, HTTP_REQUEST);
+ http_parser_settings_init(&parser_settings_);
+
+ parser_.data = this;
+ parser_settings_.on_url = on_url;
+}
+
+void Server::ClientConnection::on_read(const char* data, size_t len) {
+ request_ = String(data, len);
+ size_t parsed = http_parser_execute(&parser_, &parser_settings_, data, len);
+ if (parsed < static_cast(len)) {
+ enum http_errno err = HTTP_PARSER_ERRNO(&parser_);
+ fprintf(stderr, "%s: %s\n", http_errno_name(err), http_errno_description(err));
+ close();
+ }
+}
+
+int Server::ClientConnection::on_url(http_parser* parser, const char* buf, size_t len) {
+ ClientConnection* self = static_cast(parser->data);
+ self->handle_url(buf, len);
+ return 0;
+}
+
+void Server::ClientConnection::handle_url(const char* buf, size_t len) {
+ String path(buf, len);
+ if (path.substr(0, path.find("?")) == path_) { // Compare without query parameters
+ if (enable_valid_response_) {
+ if (response_body_.empty()) {
+ write(response(response_status_code_, request_)); // Echo response
+ } else {
+ write(response(response_status_code_, response_body_, content_type_));
+ }
+ } else {
+ write("Invalid HTTP server response");
+ }
+ } else {
+ write(response(404));
+ }
+ // From the HTTP/1.0 protocol specification:
+ //
+ // > When an Entity-Body is included with a message, the length of that body may be determined in
+ // > one of two ways. If a Content-Length header field is present, its value in bytes represents
+ // > the length of the Entity-Body. Otherwise, the body length is determined by the closing of the
+ // > connection by the server.
+ if (close_connnection_after_request_) {
+ close();
+ }
+}
diff --git a/gtests/src/unit/http_server.hpp b/gtests/src/unit/http_server.hpp
new file mode 100644
index 000000000..ed5f352d0
--- /dev/null
+++ b/gtests/src/unit/http_server.hpp
@@ -0,0 +1,122 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef HTTP_MOCK_SERVER_HPP
+#define HTTP_MOCK_SERVER_HPP
+
+#define HTTP_MOCK_HOSTNAME "cpp-driver.hostname."
+#define HTTP_MOCK_SERVER_IP "127.254.254.254"
+#define HTTP_MOCK_SERVER_PORT 30443
+
+#include "http_parser.h"
+#include "mockssandra.hpp"
+#include "string.hpp"
+
+namespace mockssandra { namespace http {
+
+/**
+ * Mockssandra HTTP server.
+ *
+ * If no response body is set then the default response will the be original request; e.g. echo HTTP
+ * server.
+ */
+class Server {
+public:
+ Server()
+ : path_("/")
+ , content_type_("text/plain")
+ , response_status_code_(200)
+ , enable_valid_response_(true)
+ , close_connnection_after_request_(true)
+ , event_loop_group_(1, "HTTP Server")
+ , factory_(this)
+ , server_connection_(new internal::ServerConnection(
+ Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), factory_)) {}
+
+ const String& path() const { return path_; }
+ const String& content_type() const { return content_type_; }
+ const String& response_body() const { return response_body_; }
+ int response_status_code() const { return response_status_code_; }
+ bool enable_valid_response() { return enable_valid_response_; }
+ bool close_connnection_after_request() { return close_connnection_after_request_; }
+
+ void set_path(const String& path) { path_ = path; }
+ void set_content_type(const String& content_type) { content_type_ = content_type; }
+ void set_response_body(const String& response_body) { response_body_ = response_body; }
+ void set_response_status_code(int status_code) { response_status_code_ = status_code; }
+ void enable_valid_response(bool enable) { enable_valid_response_ = enable; }
+ void set_close_connnection_after_request(bool enable) {
+ close_connnection_after_request_ = enable;
+ }
+
+ bool use_ssl(const String& key, const String& cert, const String& ca_cert = "",
+ bool require_client_cert = false);
+
+ void listen();
+ void close();
+
+private:
+ class ClientConnection : public internal::ClientConnection {
+ public:
+ ClientConnection(internal::ServerConnection* server_connection, Server* server);
+
+ virtual void on_read(const char* data, size_t len);
+
+ private:
+ static int on_url(http_parser* parser, const char* buf, size_t len);
+ void handle_url(const char* buf, size_t len);
+
+ private:
+ String path_;
+ String content_type_;
+ String response_body_;
+ int response_status_code_;
+ bool enable_valid_response_;
+ bool close_connnection_after_request_;
+ String request_;
+ http_parser parser_;
+ http_parser_settings parser_settings_;
+ };
+
+ class ClientConnectionFactory : public internal::ClientConnectionFactory {
+ public:
+ ClientConnectionFactory(Server* server)
+ : server_(server) {}
+
+ virtual internal::ClientConnection*
+ create(internal::ServerConnection* server_connection) const {
+ return new ClientConnection(server_connection, server_);
+ }
+
+ private:
+ Server* const server_;
+ };
+
+private:
+ String path_;
+ String content_type_;
+ String response_body_;
+ int response_status_code_;
+ bool enable_valid_response_;
+ bool close_connnection_after_request_;
+ SimpleEventLoopGroup event_loop_group_;
+ ClientConnectionFactory factory_;
+ internal::ServerConnection::Ptr server_connection_;
+};
+
+}} // namespace mockssandra::http
+
+#endif
diff --git a/gtests/src/unit/http_test.cpp b/gtests/src/unit/http_test.cpp
new file mode 100644
index 000000000..5ceea413f
--- /dev/null
+++ b/gtests/src/unit/http_test.cpp
@@ -0,0 +1,61 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "http_test.hpp"
+
+using namespace datastax;
+using namespace datastax::internal::core;
+
+SocketSettings HttpTest::use_ssl(const String& cn, bool is_server_using_ssl /*= true*/) {
+ SocketSettings settings;
+
+#ifdef HAVE_OPENSSL
+ String ca_key = mockssandra::Ssl::generate_key();
+ ca_cert_ = mockssandra::Ssl::generate_cert(ca_key, "CA");
+
+ key_ = mockssandra::Ssl::generate_key();
+ cert_ = mockssandra::Ssl::generate_cert(key_, cn, ca_cert_, ca_key);
+
+ String client_key = mockssandra::Ssl::generate_key();
+ String client_cert = mockssandra::Ssl::generate_cert(client_key, cn, ca_cert_, ca_key);
+
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+
+ ssl_context->set_cert(client_cert.c_str(), client_cert.size());
+ ssl_context->set_private_key(client_key.c_str(), client_key.size(), "",
+ 0); // No password expected for the private key
+
+ ssl_context->add_trusted_cert(ca_cert_.c_str(), ca_cert_.size());
+
+ settings.ssl_context = ssl_context;
+
+ if (is_server_using_ssl) {
+ server_.use_ssl(key_, cert_, ca_cert_, true);
+ }
+#endif
+
+ return settings;
+}
+
+void HttpTest::use_ssl(const String& ca_cert, const String& ca_key, const String& cn) {
+#ifdef HAVE_OPENSSL
+ key_ = mockssandra::Ssl::generate_key();
+ cert_ = mockssandra::Ssl::generate_cert(key_, cn, ca_cert, ca_key);
+ ca_cert_ = ca_cert;
+
+ server_.use_ssl(key_, cert_, ca_cert_, true);
+#endif
+}
diff --git a/gtests/src/unit/http_test.hpp b/gtests/src/unit/http_test.hpp
new file mode 100644
index 000000000..f4e9d8e7e
--- /dev/null
+++ b/gtests/src/unit/http_test.hpp
@@ -0,0 +1,66 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef HTTP_SERVER_TEST_HPP
+#define HTTP_SERVER_TEST_HPP
+
+#include "http_server.hpp"
+#include "loop_test.hpp"
+#include "socket_connector.hpp"
+
+class HttpTest : public LoopTest {
+public:
+ ~HttpTest() { server_.close(); }
+
+ const datastax::String& ca_cert() const { return ca_cert_; }
+ const datastax::String& cert() const { return cert_; }
+ const datastax::String& key() const { return key_; }
+
+ void set_path(const datastax::String& path) { server_.set_path(path); }
+
+ void set_content_type(const datastax::String& content_type) {
+ server_.set_content_type(content_type);
+ }
+
+ void set_response_body(const datastax::String& response_body) {
+ server_.set_response_body(response_body);
+ }
+
+ void set_response_status_code(int status_code) { server_.set_response_status_code(status_code); }
+
+ void enable_valid_response(bool enable) { server_.enable_valid_response(enable); }
+
+ void set_close_connnection_after_request(bool enable) {
+ server_.set_close_connnection_after_request(enable);
+ }
+
+ void start_http_server() { server_.listen(); }
+ void stop_http_server() { server_.close(); }
+
+ datastax::internal::core::SocketSettings use_ssl(const String& cn = HTTP_MOCK_HOSTNAME,
+ bool is_server_using_ssl = true);
+
+ void use_ssl(const String& ca_cert, const String& ca_key, const String& cn);
+
+private:
+ datastax::String ca_cert_;
+ datastax::String cert_;
+ datastax::String key_;
+
+ mockssandra::http::Server server_;
+};
+
+#endif
diff --git a/gtests/src/unit/mockssandra.cpp b/gtests/src/unit/mockssandra.cpp
index 2620cece0..f174f1481 100644
--- a/gtests/src/unit/mockssandra.cpp
+++ b/gtests/src/unit/mockssandra.cpp
@@ -25,11 +25,16 @@
#include "tracing_data_handler.hpp" // For tracing query
#include "uuids.hpp"
+#include
+#include
+#include
+
#ifdef WIN32
#include "winsock.h"
#endif
using datastax::internal::bind_callback;
+using datastax::internal::Map;
using datastax::internal::Memory;
using datastax::internal::OStringStream;
using datastax::internal::ScopedMutex;
@@ -40,8 +45,48 @@ using datastax::internal::core::UuidGen;
#define DSE_VERSION "6.7.1"
#define DSE_CASSANDRA_VERSION "4.0.0.671"
+#if defined(OPENSSL_VERSION_NUMBER) && \
+ !defined(LIBRESSL_VERSION_NUMBER) // Required as OPENSSL_VERSION_NUMBER for LibreSSL is defined
+ // as 2.0.0
+#if (OPENSSL_VERSION_NUMBER >= 0x10100000L)
+#define SSL_SERVER_METHOD TLS_server_method
+#else
+#define SSL_SERVER_METHOD SSLv23_server_method
+#endif
+#else
+#if (LIBRESSL_VERSION_NUMBER >= 0x20302000L)
+#define SSL_SERVER_METHOD TLS_server_method
+#else
+#define SSL_SERVER_METHOD SSLv23_server_method
+#endif
+#endif
+
namespace mockssandra {
+static DH* dh_parameters() {
+ // Generated using the following command: `openssl dhparam -C 2048`
+ // Prime length of 2048 chosen to bypass client-side error:
+ // `SSL3_CHECK_CERT_AND_ALGORITHM:dh key too small`
+
+ // Note: This is not generated, programmatically, using something like the following:
+ // `DH_generate_parameters_ex(dh, 2048, DH_GENERATOR_5, NULL)`
+ // because DH prime generation takes a *REALLY* long time.
+ static const char* dh_parameters_pem =
+ "-----BEGIN DH PARAMETERS-----\n"
+ "MIIBCAKCAQEAusYypYO7u8mHelHjpDuUy7hjBgPw/KS03iSRnP5SNMB6OxVFslXv\n"
+ "s6McqEf218Fqpzi18tWA7fq3fvlT+Nx1Tda+Za5C8o5niRYxHks5N+RfnnrFf7vn\n"
+ "0lxrzsXP6es08Ts/UGMsp1nEaCSd/gjDglPgjdC1V/KmBsbT+8IwpbzPPdir0/jA\n"
+ "r+DXssZRZl7JtymGHXPkXTSBhsqSHamfzGRnAQFWToKAinqAdhY7pN/8krwvRj04\n"
+ "VYp84xAy2M6mWWqUm/kokN9QjAiT/DZRxZK8VhY7O9+oATo7/YPCMd9Em417O13k\n"
+ "+F0o/8IMaQvpmtlAsLc2ZKwGqqG+HD2dOwIBAg==\n"
+ "-----END DH PARAMETERS-----";
+ BIO* bio = BIO_new_mem_buf(const_cast(dh_parameters_pem),
+ -1); // Use null terminator for length
+ DH* dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
+ BIO_free(bio);
+ return dh;
+}
+
String Ssl::generate_key() {
EVP_PKEY* pkey = NULL;
EVP_PKEY_CTX* pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL);
@@ -63,7 +108,7 @@ String Ssl::generate_key() {
return result;
}
-String Ssl::generate_cert(const String& key, String cn) {
+String Ssl::generate_cert(const String& key, String cn, String ca_cert, String ca_key) {
// Assign the proper default hostname
if (cn.empty()) {
#ifdef WIN32
@@ -85,6 +130,20 @@ String Ssl::generate_cert(const String& key, String cn) {
BIO_free(bio);
}
+ X509_REQ* x509_req = NULL;
+ if (!ca_cert.empty() && !ca_key.empty()) {
+ x509_req = X509_REQ_new();
+ X509_REQ_set_version(x509_req, 2);
+ X509_REQ_set_pubkey(x509_req, pkey);
+
+ X509_NAME* name = X509_REQ_get_subject_name(x509_req);
+ X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
+ reinterpret_cast("US"), -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
+ reinterpret_cast(cn.c_str()), -1, -1, 0);
+ X509_REQ_sign(x509_req, pkey, EVP_sha256());
+ }
+
X509* x509 = X509_new();
X509_set_version(x509, 2);
ASN1_INTEGER_set(X509_get_serialNumber(x509), 0);
@@ -92,13 +151,62 @@ String Ssl::generate_cert(const String& key, String cn) {
X509_gmtime_adj(X509_get_notAfter(x509), static_cast(60 * 60 * 24 * 365));
X509_set_pubkey(x509, pkey);
- X509_NAME* name = X509_get_subject_name(x509);
- X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, reinterpret_cast("US"),
- -1, -1, 0);
- X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
- reinterpret_cast(cn.c_str()), -1, -1, 0);
- X509_set_issuer_name(x509, name);
- X509_sign(x509, pkey, EVP_md5());
+ if (x509_req) {
+ X509_set_subject_name(x509, X509_REQ_get_subject_name(x509_req));
+
+ X509* x509_ca = NULL;
+ { // Read CA from string
+ BIO* bio = BIO_new_mem_buf(const_cast(ca_cert.c_str()), ca_cert.length());
+ if (!PEM_read_bio_X509(bio, &x509_ca, NULL, NULL)) {
+ X509_free(x509);
+ X509_REQ_free(x509_req);
+ BIO_free(bio);
+ return "";
+ }
+ BIO_free(bio);
+ }
+ X509_set_issuer_name(x509, X509_get_issuer_name(x509_ca));
+
+ EVP_PKEY* pkey_ca = NULL;
+ { // Read key from string
+ BIO* bio = BIO_new_mem_buf(const_cast(ca_key.c_str()), ca_key.length());
+ if (!PEM_read_bio_PrivateKey(bio, &pkey_ca, NULL, NULL)) {
+ BIO_free(bio);
+ X509_free(x509);
+ X509_free(x509_ca);
+ X509_REQ_free(x509_req);
+ return "";
+ }
+ BIO_free(bio);
+ }
+ X509_sign(x509, pkey_ca, EVP_sha256());
+
+ X509_free(x509_ca);
+ EVP_PKEY_free(pkey_ca);
+ } else {
+ if (cn == "CA") { // Set the purpose as a CA certificate.
+ X509_EXTENSION* x509_ex;
+ X509V3_CTX x509v3_ctx;
+ X509V3_set_ctx_nodb(&x509v3_ctx);
+ X509V3_set_ctx(&x509v3_ctx, x509, x509, NULL, NULL, 0);
+ x509_ex = X509V3_EXT_conf_nid(NULL, &x509v3_ctx, NID_basic_constraints,
+ const_cast("critical,CA:TRUE"));
+ if (!x509_ex) {
+ X509_free(x509);
+ X509_EXTENSION_free(x509_ex);
+ return "";
+ }
+ X509_add_ext(x509, x509_ex, -1);
+ X509_EXTENSION_free(x509_ex);
+ }
+ X509_NAME* name = X509_get_subject_name(x509);
+ X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
+ reinterpret_cast("US"), -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
+ reinterpret_cast(cn.c_str()), -1, -1, 0);
+ X509_set_issuer_name(x509, name);
+ X509_sign(x509, pkey, EVP_sha256());
+ }
String result;
{ // Write cert into string
@@ -111,6 +219,8 @@ String Ssl::generate_cert(const String& key, String cn) {
}
X509_free(x509);
+ if (x509_req) X509_REQ_free(x509_req);
+
EVP_PKEY_free(pkey);
return result;
@@ -125,6 +235,18 @@ static void print_ssl_error() {
fprintf(stderr, "%s\n", buf);
}
+static X509* load_cert(const String& cert) {
+ X509* x509 = NULL;
+ BIO* bio = BIO_new_mem_buf(const_cast(cert.c_str()), cert.length());
+ if (PEM_read_bio_X509(bio, &x509, NULL, NULL) == NULL) {
+ print_ssl_error();
+ BIO_free(bio);
+ return NULL;
+ }
+ BIO_free(bio);
+ return x509;
+}
+
struct WriteReq {
WriteReq(const char* data, size_t len, ClientConnection* connection)
: data(data, len)
@@ -151,8 +273,7 @@ ClientConnection::ClientConnection(ServerConnection* server)
, server_(server)
, ssl_(server->ssl_context() ? SSL_new(server->ssl_context()) : NULL)
, incoming_bio_(ssl_ ? BIO_new(BIO_s_mem()) : NULL)
- , outgoing_bio_(ssl_ ? BIO_new(BIO_s_mem()) : NULL)
- , handshake_state_(SSL_HANDSHAKE_INPROGRESS) {
+ , outgoing_bio_(ssl_ ? BIO_new(BIO_s_mem()) : NULL) {
tcp_.init(server->loop());
if (ssl_) {
SSL_set_bio(ssl_, incoming_bio_, outgoing_bio_);
@@ -185,6 +306,13 @@ int ClientConnection::accept() {
return uv_read_start(tcp_.as_stream(), on_alloc, on_read);
}
+const char* ClientConnection::sni_server_name() const {
+ if (ssl_) {
+ return SSL_get_servername(ssl_, TLSEXT_NAMETYPE_host_name);
+ }
+ return NULL;
+}
+
void ClientConnection::on_close(uv_handle_t* handle) {
ClientConnection* connection = static_cast(handle->data);
connection->handle_close();
@@ -234,21 +362,8 @@ void ClientConnection::handle_write(int status) {
close();
return;
}
- if (ssl_) {
- switch (handshake_state_) {
- case SSL_HANDSHAKE_INPROGRESS:
- // Nothing to do
- break;
- case SSL_HANDSHAKE_DONE:
- on_write();
- break;
- case SSL_HANDSHAKE_FINAL_WRITE:
- handshake_state_ = SSL_HANDSHAKE_DONE;
- break;
- }
- } else {
- on_write();
- }
+
+ on_write();
}
int ClientConnection::internal_write(const char* data, size_t len) {
@@ -329,12 +444,10 @@ void ClientConnection::on_ssl_read(const char* data, size_t len) {
internal_write(buf, num_bytes);
}
- if (is_handshake_done()) {
- handshake_state_ = data_written ? SSL_HANDSHAKE_FINAL_WRITE : SSL_HANDSHAKE_DONE;
+ if (is_handshake_done() && data_written) {
+ return; // Handshake is not completed; ingore remaining data
}
- }
-
- if (is_handshake_done()) {
+ } else {
char buf[SSL_BUF_SIZE];
while ((rc = SSL_read(ssl_, buf, sizeof(buf))) > 0) {
on_read(buf, rc);
@@ -369,41 +482,55 @@ uv_loop_t* ServerConnection::loop() {
return event_loop_->loop();
}
-bool ServerConnection::use_ssl(const String& key, const String& cert, const String& password) {
+bool ServerConnection::use_ssl(const String& key, const String& cert,
+ const String& ca_cert /*= ""*/,
+ bool require_client_cert /*= false*/) {
if (ssl_context_) {
SSL_CTX_free(ssl_context_);
}
- if ((ssl_context_ = SSL_CTX_new(SSLv23_server_method())) == NULL) {
+ if ((ssl_context_ = SSL_CTX_new(SSL_SERVER_METHOD())) == NULL) {
print_ssl_error();
return false;
}
- SSL_CTX_set_default_passwd_cb_userdata(ssl_context_, (void*)password.c_str());
+ SSL_CTX_set_default_passwd_cb_userdata(ssl_context_, (void*)"");
SSL_CTX_set_default_passwd_cb(ssl_context_, on_password);
+ SSL_CTX_set_verify(ssl_context_, SSL_VERIFY_NONE, NULL);
- X509* x509 = NULL;
- { // Read cert from string
- BIO* bio = BIO_new_mem_buf(const_cast(cert.c_str()), cert.length());
- if (PEM_read_bio_X509(bio, &x509, NULL, NULL) == NULL) {
+ { // Load server certificate
+ X509* x509 = load_cert(cert);
+ if (!x509) return false;
+ if (SSL_CTX_use_certificate(ssl_context_, x509) <= 0) {
print_ssl_error();
- BIO_free(bio);
+ X509_free(x509);
return false;
}
- BIO_free(bio);
+ X509_free(x509);
}
- if (SSL_CTX_use_certificate(ssl_context_, x509) <= 0) {
- print_ssl_error();
- X509_free(x509);
- return false;
+ if (!ca_cert.empty()) { // Load CA certificate
+ X509* x509 = load_cert(ca_cert);
+ if (!x509) return false;
+ if (SSL_CTX_add_extra_chain_cert(ssl_context_, x509) <= 0) { // Certificate freed by function
+ print_ssl_error();
+ X509_free(x509);
+ return false;
+ }
+ if (require_client_cert) {
+ X509_STORE* cert_store = SSL_CTX_get_cert_store(ssl_context_);
+ if (X509_STORE_add_cert(cert_store, x509) <= 0) {
+ print_ssl_error();
+ return false;
+ }
+ SSL_CTX_set_verify(ssl_context_, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
+ }
}
- X509_free(x509);
EVP_PKEY* pkey = NULL;
{ // Read key from string
BIO* bio = BIO_new_mem_buf(const_cast(key.c_str()), key.length());
- if (PEM_read_bio_PrivateKey(bio, &pkey, on_password, (void*)password.c_str()) == NULL) {
+ if (PEM_read_bio_PrivateKey(bio, &pkey, on_password, (void*)"") == NULL) {
print_ssl_error();
BIO_free(bio);
return false;
@@ -418,11 +545,13 @@ bool ServerConnection::use_ssl(const String& key, const String& cert, const Stri
}
EVP_PKEY_free(pkey);
- RSA* rsa = RSA_generate_key(512, RSA_F4, NULL, NULL);
- SSL_CTX_set_tmp_rsa(ssl_context_, rsa);
- RSA_free(rsa);
-
- SSL_CTX_set_verify(ssl_context_, SSL_VERIFY_NONE, 0);
+ DH* dh = dh_parameters();
+ if (!dh || !SSL_CTX_set_tmp_dh(ssl_context_, dh)) {
+ print_ssl_error();
+ DH_free(dh);
+ return false;
+ }
+ DH_free(dh);
return true;
}
@@ -513,7 +642,8 @@ void ServerConnection::internal_listen() {
inc_ref(); // For the TCP handle
- rc = tcp_.bind(address_.addr());
+ Address::SocketStorage storage;
+ rc = tcp_.bind(address_.to_sockaddr(&storage));
if (rc != 0) {
fprintf(stderr, "Unable to bind address %s\n", address_.to_string(true).c_str());
uv_close(tcp_.as_handle(), on_close);
@@ -777,7 +907,7 @@ const char* decode_query_params_v1(const char* input, const char* end, bool is_e
}
const char* decode_query_params_v2(const char* input, const char* end, QueryParameters* params) {
- int8_t flags;
+ int8_t flags = 0;
const char* pos = input;
pos = decode_uint16(pos, end, ¶ms->consistency);
pos = decode_int8(pos, end, &flags);
@@ -798,7 +928,7 @@ const char* decode_query_params_v2(const char* input, const char* end, QueryPara
}
const char* decode_query_params_v3v4(const char* input, const char* end, QueryParameters* params) {
- int8_t flags;
+ int8_t flags = 0;
const char* pos = input;
pos = decode_uint16(pos, end, ¶ms->consistency);
pos = decode_int8(pos, end, &flags);
@@ -824,7 +954,7 @@ const char* decode_query_params_v3v4(const char* input, const char* end, QueryPa
}
const char* decode_query_params_v5(const char* input, const char* end, QueryParameters* params) {
- int32_t flags;
+ int32_t flags = 0;
const char* pos = input;
pos = decode_uint16(pos, end, ¶ms->consistency);
pos = decode_int32(pos, end, &flags);
@@ -967,6 +1097,15 @@ inline int32_t encode_uuid(CassUuid uuid, String* output) {
return 16;
}
+int32_t encode_string_map(const Map >& value, String* output) {
+ int32_t size = encode_uint16(value.size(), output);
+ for (Map >::const_iterator it = value.begin(); it != value.end(); ++it) {
+ size += encode_string(it->first, output);
+ size += encode_string_list(it->second, output);
+ }
+ return size;
+}
+
static String encode_header(int8_t version, int8_t flags, int16_t stream, int8_t opcode,
int32_t len) {
String header;
@@ -1553,8 +1692,8 @@ void SystemPeers::on_run(Request* request) const {
}
String ip = query.substr(pos, end_pos - pos);
- Address address;
- if (!Address::from_string(ip, request->address().port(), &address)) {
+ Address address(ip, request->address().port());
+ if (!address.is_valid_and_resolved()) {
request->error(ERROR_INVALID_QUERY, "Invalid inet address in WHERE clause");
return;
}
@@ -1622,8 +1761,8 @@ void SystemPeersDse::on_run(Request* request) const {
}
String ip = query.substr(pos, end_pos - pos);
- Address address;
- if (!Address::from_string(ip, request->address().port(), &address)) {
+ Address address(ip, request->address().port());
+ if (!address.is_valid_and_resolved()) {
request->error(ERROR_INVALID_QUERY, "Invalid inet address in WHERE clause");
return;
}
@@ -1872,7 +2011,17 @@ int32_t ProtocolHandler::decode_frame(ClientConnection* client, const char* fram
} else {
return len - remaining;
}
- state_ = BODY;
+
+ if (length_ == 0) {
+ decode_body(client, pos, 0);
+ version_ = 0;
+ flags_ = 0;
+ opcode_ = 0;
+ length_ = 0;
+ state_ = PROTOCOL_VERSION;
+ } else {
+ state_ = BODY;
+ }
break;
case BODY:
if (remaining >= length_) {
@@ -2234,9 +2383,10 @@ Host::Host(const Address& address, const String& dc, const String& rack, MT19937
}
}
-SimpleEventLoopGroup::SimpleEventLoopGroup(size_t num_threads)
+SimpleEventLoopGroup::SimpleEventLoopGroup(size_t num_threads,
+ const String& thread_name /*= "mockssandra"*/)
: RoundRobinEventLoopGroup(num_threads) {
- int rc = init("mockssandra");
+ int rc = init(thread_name);
UNUSED_(rc);
assert(rc == 0 && "Unable to initialize simple event loop");
run();
diff --git a/gtests/src/unit/mockssandra.hpp b/gtests/src/unit/mockssandra.hpp
index 2e4d27e80..eb011ce4d 100644
--- a/gtests/src/unit/mockssandra.hpp
+++ b/gtests/src/unit/mockssandra.hpp
@@ -28,6 +28,7 @@
#include "address.hpp"
#include "event_loop.hpp"
#include "list.hpp"
+#include "map.hpp"
#include "ref_counted.hpp"
#include "scoped_ptr.hpp"
#include "string.hpp"
@@ -46,6 +47,7 @@
using datastax::String;
using datastax::internal::Atomic;
using datastax::internal::List;
+using datastax::internal::Map;
using datastax::internal::RefCounted;
using datastax::internal::ScopedPtr;
using datastax::internal::SharedRefPtr;
@@ -62,7 +64,8 @@ namespace mockssandra {
class Ssl {
public:
static String generate_key();
- static String generate_cert(const String& key, String cn = "");
+ static String generate_cert(const String& key, String cn = "", String ca_cert = "",
+ String ca_key = "");
};
namespace internal {
@@ -103,6 +106,8 @@ class ClientConnection {
protected:
int accept();
+ const char* sni_server_name() const;
+
private:
static void on_close(uv_handle_t* handle);
void handle_close();
@@ -144,6 +149,7 @@ class ClientConnection {
class ClientConnectionFactory {
public:
virtual ClientConnection* create(ServerConnection* server) const = 0;
+ virtual ~ClientConnectionFactory() {}
};
class ServerConnectionTask : public RefCounted {
@@ -168,7 +174,8 @@ class ServerConnection : public RefCounted {
SSL_CTX* ssl_context() { return ssl_context_; }
const ClientConnections& clients() const { return clients_; }
- bool use_ssl(const String& key, const String& cert, const String& password = "");
+ bool use_ssl(const String& key, const String& cert, const String& ca_cert = "",
+ bool require_client_cert = false);
void listen(EventLoopGroup* event_loop_group);
int wait_listen();
@@ -355,6 +362,8 @@ struct QueryParameters {
String keyspace;
};
+int32_t encode_string_map(const Map >& value, String* output);
+
class Type {
public:
static Type text();
@@ -1204,7 +1213,7 @@ class Cluster {
class SimpleEventLoopGroup : public RoundRobinEventLoopGroup {
public:
- SimpleEventLoopGroup(size_t num_threads = 1);
+ SimpleEventLoopGroup(size_t num_threads = 1, const String& thread_name = "mockssandra");
~SimpleEventLoopGroup();
};
@@ -1246,53 +1255,39 @@ class SimpleCluster : public Cluster {
class SimpleEchoServer {
public:
- SimpleEchoServer(const Address& address = Address("127.0.0.1", 8888))
- : event_loop_group_(1)
- , server_(new internal::ServerConnection(address, factory_)) {}
+ SimpleEchoServer()
+ : factory_(new EchoClientConnectionFactory())
+ , event_loop_group_(1) {}
~SimpleEchoServer() { close(); }
void close() {
- server_->close();
- server_->wait_close();
+ if (server_) {
+ server_->close();
+ server_->wait_close();
+ }
}
String use_ssl(const String& cn = "") {
- String key(Ssl::generate_key());
- String cert(Ssl::generate_cert(key, cn));
- if (!server_->use_ssl(key, cert)) {
- return "";
- }
- return cert;
+ ssl_key_ = Ssl::generate_key();
+ ssl_cert_ = Ssl::generate_cert(ssl_key_, cn);
+ return ssl_cert_;
}
- void use_close_immediately() { factory_.use_close_immediately(); }
+ void use_connection_factory(internal::ClientConnectionFactory* factory) {
+ factory_.reset(factory);
+ }
- int listen() {
+ int listen(const Address& address = Address("127.0.0.1", 8888)) {
+ server_.reset(new internal::ServerConnection(address, *factory_));
+ if (!ssl_key_.empty() && !ssl_cert_.empty() && !server_->use_ssl(ssl_key_, ssl_cert_)) {
+ return -1;
+ }
server_->listen(&event_loop_group_);
return server_->wait_listen();
}
- void reset(const Address& address) {
- server_.reset(new internal::ServerConnection(address, factory_));
- }
-
private:
- class CloseConnection : public internal::ClientConnection {
- public:
- CloseConnection(internal::ServerConnection* server)
- : internal::ClientConnection(server) {}
-
- virtual int on_accept() {
- int rc = accept();
- if (rc != 0) {
- return rc;
- }
- close();
- return rc;
- }
- };
-
class EchoConnection : public internal::ClientConnection {
public:
EchoConnection(internal::ServerConnection* server)
@@ -1301,29 +1296,19 @@ class SimpleEchoServer {
virtual void on_read(const char* data, size_t len) { write(data, len); }
};
- class ClientConnectionFactory : public internal::ClientConnectionFactory {
+ class EchoClientConnectionFactory : public internal::ClientConnectionFactory {
public:
- ClientConnectionFactory()
- : close_immediately_(false) {}
-
- void use_close_immediately() { close_immediately_ = true; }
-
virtual internal::ClientConnection* create(internal::ServerConnection* server) const {
- if (close_immediately_) {
- return new CloseConnection(server);
- } else {
- return new EchoConnection(server);
- }
+ return new EchoConnection(server);
}
-
- private:
- bool close_immediately_;
};
private:
- ClientConnectionFactory factory_;
+ ScopedPtr factory_;
SimpleEventLoopGroup event_loop_group_;
internal::ServerConnection::Ptr server_;
+ String ssl_key_;
+ String ssl_cert_;
};
} // namespace mockssandra
diff --git a/gtests/src/unit/tests/test_address.cpp b/gtests/src/unit/tests/test_address.cpp
index c318445c5..cae21eb25 100644
--- a/gtests/src/unit/tests/test_address.cpp
+++ b/gtests/src/unit/tests/test_address.cpp
@@ -19,15 +19,181 @@
#include "address.hpp"
using datastax::internal::core::Address;
+using datastax::internal::core::AddressSet;
+
+TEST(AddressUnitTest, FromString) {
+ EXPECT_TRUE(Address("127.0.0.1", 9042).is_resolved());
+ EXPECT_TRUE(Address("0.0.0.0", 9042).is_resolved());
+ EXPECT_TRUE(Address("::", 9042).is_resolved());
+ EXPECT_TRUE(Address("::1", 9042).is_resolved());
+ EXPECT_TRUE(Address("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 9042).is_resolved());
+
+ EXPECT_FALSE(Address().is_resolved());
+ EXPECT_FALSE(Address("localhost", 9042).is_resolved());
+ EXPECT_FALSE(Address("datastax.com", 9042).is_resolved());
+}
TEST(AddressUnitTest, CompareIPv4) {
- EXPECT_GT(Address("255.255.255.255", 9042).compare(Address("0.0.0.0", 9042)), 0);
- EXPECT_LT(Address("0.0.0.0", 9042).compare(Address("255.255.255.255", 9042)), 0);
- EXPECT_EQ(Address("1.2.3.4", 9042).compare(Address("1.2.3.4", 9042)), 0);
+ EXPECT_LT(Address("0.0.0.0", 9042), Address("255.255.255.255", 9042));
+ EXPECT_EQ(Address("1.2.3.4", 9042), Address("1.2.3.4", 9042));
+ EXPECT_NE(Address("1.2.3.4", 9042), Address("5.6.7.8", 9042));
+
+ EXPECT_LT(Address("0.0.0.0", 9041), Address("0.0.0.0", 9042));
+ EXPECT_NE(Address("0.0.0.0", 9041), Address("0.0.0.0", 9042));
+
+ // Without comparing port
+ EXPECT_TRUE(Address("0.0.0.0", 9041).equals(Address("0.0.0.0", 9042), false));
+ EXPECT_FALSE(Address("127.0.0.1", 9042).equals(Address("0.0.0.0", 9042), false));
}
TEST(AddressUnitTest, CompareIPv6) {
- EXPECT_GT(Address("0.0.0.0", 1).compare(Address("0.0.0.0", 0), true), 0);
- EXPECT_LT(Address("0.0.0.0", 0).compare(Address("0.0.0.0", 1), true), 0);
- EXPECT_EQ(Address("0.0.0.0", 0).compare(Address("0.0.0.0", 1), false), 0);
+ EXPECT_LT(Address("0:0:0:0:0:0:0:0", 9042), Address("0:0:0:0:0:0:0:FFFF", 9042));
+ EXPECT_EQ(Address("0:0:0:0:0:0:0:1234", 9042), Address("0:0:0:0:0:0:0:1234", 9042));
+ EXPECT_NE(Address("0:0:0:0:0:0:0:1234", 9042), Address("0:0:0:0:0:0:0:5678", 9042));
+
+ EXPECT_LT(Address("0:0:0:0:0:0:0:0", 9041), Address("0:0:0:0:0:0:0:0", 9042));
+ EXPECT_NE(Address("0:0:0:0:0:0:0:0", 9041), Address("0:0:0:0:0:0:0:0", 9042));
+
+ // Without comparing port
+ EXPECT_TRUE(Address("::", 9041).equals(Address("::", 9042), false));
+ EXPECT_FALSE(Address("::1", 9042).equals(Address("::", 9042), false));
+
+ EXPECT_EQ(Address("0:0:0:0:0:0:0:0", 9042), Address("::", 9042)); // Normalization
+}
+
+TEST(AddressUnitTest, ToSockAddrIPv4) {
+ Address expected("127.0.0.1", 9042);
+ Address::SocketStorage storage;
+ Address actual(expected.to_sockaddr(&storage));
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(AddressUnitTest, ToSockAddrIPv6) {
+ Address expected("::1", 9042);
+ Address::SocketStorage storage;
+ Address actual(expected.to_sockaddr(&storage));
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(AddressUnitTest, ToInetIPv4) {
+ Address expected("127.0.0.1", 9042);
+
+ uint8_t inet_address[4];
+ uint8_t inet_address_length = expected.to_inet(inet_address);
+ EXPECT_EQ(inet_address_length, 4u);
+
+ Address actual(inet_address, inet_address_length, 9042);
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(AddressUnitTest, ToInetIPv6) {
+ Address expected("::1", 9042);
+
+ uint8_t inet_address[16];
+ uint8_t inet_address_length = expected.to_inet(inet_address);
+ EXPECT_EQ(inet_address_length, 16u);
+
+ Address actual(inet_address, inet_address_length, 9042);
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(AddressUnitTest, ToString) {
+ // Only hostname/address
+ EXPECT_EQ(Address("127.0.0.1", 9042).hostname_or_address(), "127.0.0.1");
+ EXPECT_EQ(Address("::1", 9042).hostname_or_address(), "::1");
+ EXPECT_EQ(Address("0:0:0:0:0:0:0:1", 9042).hostname_or_address(), "::1"); // IPv6 normalization
+ EXPECT_EQ(Address("0:0:0:0:0:0:0:0", 9042).hostname_or_address(), "::"); // IPv6 normalization
+ EXPECT_EQ(Address("datastax.com", 9042).hostname_or_address(), "datastax.com");
+
+ // w/o port
+ EXPECT_EQ(Address("127.0.0.1", 9042).to_string(), "127.0.0.1");
+ EXPECT_EQ(Address("::1", 9042).to_string(), "::1");
+ EXPECT_EQ(Address("datastax.com", 9042).to_string(), "datastax.com");
+
+ // w/ port
+ EXPECT_EQ(Address("127.0.0.1", 9042).to_string(true), "127.0.0.1:9042");
+ EXPECT_EQ(Address("::1", 9042).to_string(true), "[::1]:9042");
+ EXPECT_EQ(Address("datastax.com", 9042).to_string(true), "datastax.com:9042");
+
+ // w/ servername
+ EXPECT_EQ(Address("127.0.0.1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(),
+ "127.0.0.1 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)");
+ EXPECT_EQ(Address("::1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(),
+ "::1 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)");
+ EXPECT_EQ(Address("datastax.com", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(),
+ "datastax.com (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)");
+
+ // w/ servername and port
+ EXPECT_EQ(Address("127.0.0.1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(true),
+ "127.0.0.1:9042 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)");
+ EXPECT_EQ(Address("::1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(true),
+ "[::1]:9042 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)");
+ EXPECT_EQ(Address("datastax.com", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(true),
+ "datastax.com:9042 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)");
+}
+
+TEST(AddressUnitTest, Hash) {
+ AddressSet set;
+
+ EXPECT_EQ(set.size(), 0u); // Empty
+
+ set.insert(Address("0.0.0.0", 9042));
+ EXPECT_EQ(set.size(), 1u); // Added
+
+ // Reinsert
+ set.insert(Address("0.0.0.0", 9042));
+ EXPECT_EQ(set.size(), 1u); // No change
+
+ // Remove
+ set.erase(Address("0.0.0.0", 9042));
+ EXPECT_EQ(set.size(), 0u); // Removed
+
+ // Multiple
+ set.insert(Address("0.0.0.0", 9042));
+ set.insert(Address("127.0.0.1", 9042));
+ set.insert(Address("localhost", 9042));
+ set.insert(Address("::1", 9042));
+ EXPECT_EQ(set.size(), 4u); // Added
+ EXPECT_EQ(set.count(Address("0.0.0.0", 9042)), 1u);
+ EXPECT_EQ(set.count(Address("127.0.0.1", 9042)), 1u);
+ EXPECT_EQ(set.count(Address("localhost", 9042)), 1u);
+ EXPECT_EQ(set.count(Address("::1", 9042)), 1u);
+
+ // Different port
+ set.insert(Address("0.0.0.0", 9041));
+ EXPECT_EQ(set.size(), 5u); // Added
+}
+
+TEST(AddressUnitTest, StrictWeakOrder) {
+ { // Family
+ Address a("localhost", 9042);
+ Address b("127.0.0.1", 30002, "a");
+ ASSERT_NE(a, b);
+ ASSERT_TRUE(a < b);
+ ASSERT_FALSE(b < a);
+ }
+
+ { // Port
+ Address a("localhost", 9042, "b");
+ Address b("localhost", 30002, "a");
+ ASSERT_NE(a, b);
+ ASSERT_TRUE(a < b);
+ ASSERT_FALSE(b < a);
+ }
+
+ { // Server name
+ Address a("127.0.0.2", 9042, "a");
+ Address b("127.0.0.1", 9042, "b");
+ ASSERT_NE(a, b);
+ ASSERT_TRUE(a < b);
+ ASSERT_FALSE(b < a);
+ }
+
+ { // Hostname or address
+ Address a("127.0.0.1", 9042, "a");
+ Address b("127.0.0.2", 9042, "a");
+ ASSERT_NE(a, b);
+ ASSERT_TRUE(a < b);
+ ASSERT_FALSE(b < a);
+ }
}
diff --git a/gtests/src/unit/tests/test_cloud_secure_connect_config.cpp b/gtests/src/unit/tests/test_cloud_secure_connect_config.cpp
new file mode 100644
index 000000000..60bff6733
--- /dev/null
+++ b/gtests/src/unit/tests/test_cloud_secure_connect_config.cpp
@@ -0,0 +1,687 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "driver_config.hpp"
+
+#ifdef HAVE_ZLIB
+#include "http_test.hpp"
+
+#include "cloud_secure_connection_config.hpp"
+#include "cluster_config.hpp"
+#include "cluster_connector.hpp"
+#include "cluster_metadata_resolver.hpp"
+#include "config.hpp"
+#include "http_client.hpp"
+#include "json.hpp"
+#include "string.hpp"
+
+#include "zip.h"
+
+#include
+#include
+
+#define CONFIGURATION_FILE "config.json"
+#define CERTIFICATE_AUTHORITY_FILE "ca.crt"
+#define CERTIFICATE_FILE "cert"
+#define KEY_FILE "key"
+
+#define CREDS_V1_ZIP_FILE "creds-v1.zip"
+
+#ifdef _WIN32
+#define PATH_SEPARATOR '\\'
+#else
+#define PATH_SEPARATOR '/'
+#endif
+
+#define SNI_LOCAL_DC "dc1"
+#define SNI_HOST HTTP_MOCK_HOSTNAME
+#define SNI_PORT 30002
+#define SNI_HOST_AND_PORT HTTP_MOCK_HOSTNAME ":30002"
+#define SNI_HOST_ID_1 "276b1694-64c4-4ba8-afb4-e33915a02f1e"
+#define SNI_HOST_ID_2 "8c29f723-5c1c-4ffd-a4ef-8c683a7fc02b"
+#define SNI_HOST_ID_3 "fb91d3ff-47cb-447d-b31d-c5721ca8d7ab"
+#define METADATA_SERVICE_PORT 30443
+
+using datastax::String;
+using datastax::internal::core::AddressVec;
+using datastax::internal::core::CloudSecureConnectionConfig;
+using datastax::internal::core::ClusterConfig;
+using datastax::internal::core::ClusterMetadataResolver;
+using datastax::internal::core::ClusterSettings;
+using datastax::internal::core::Config;
+using datastax::internal::core::HttpClient;
+using datastax::internal::core::SslContext;
+using datastax::internal::core::SslContextFactory;
+using datastax::internal::enterprise::DsePlainTextAuthProvider;
+using datastax::internal::json::StringBuffer;
+using datastax::internal::json::Writer;
+
+using mockssandra::Ssl;
+
+class CloudSecureConnectionConfigTest : public HttpTest {
+public:
+ const String& ca_cert() const { return ca_cert_; }
+ void set_invalid_ca_cert() { ca_cert_ = "!!!!!INVALID!!!!!"; }
+ const String& ca_key() const { return ca_key_; }
+ const String& cert() const { return cert_; }
+ void set_invalid_cert() { cert_ = "!!!!!INVALID!!!!!"; }
+ const String& key() const { return key_; }
+ void set_invalid_key() { key_ = "!!!!!INVALID!!!!!"; }
+
+ void SetUp() {
+ HttpTest::SetUp();
+
+ char tmp[260] = { 0 }; // Note: 260 is the maximum path on Windows
+ size_t tmp_length = 260;
+ uv_os_tmpdir(tmp, &tmp_length);
+
+ tmp_zip_file_ = String(tmp, tmp_length) + PATH_SEPARATOR + CREDS_V1_ZIP_FILE;
+
+ ca_key_ = Ssl::generate_key();
+ ca_cert_ = Ssl::generate_cert(ca_key_, "CA");
+ key_ = Ssl::generate_key();
+ cert_ = Ssl::generate_cert(key_, "", ca_cert_, ca_key_);
+ }
+
+ const String& creds_zip_file() const { return tmp_zip_file_; }
+
+ void create_zip_file(const String& config, bool is_configuration = true, bool is_ca = true,
+ bool is_cert = true, bool is_key = true) {
+ zipFile zip_file = zipOpen64(tmp_zip_file_.c_str(), 0);
+
+ if (is_configuration && add_zip_file_entry(zip_file, CONFIGURATION_FILE)) {
+ zipWriteInFileInZip(zip_file, config.c_str(), config.length());
+ zipCloseFileInZip(zip_file);
+ }
+ if (is_ca && add_zip_file_entry(zip_file, CERTIFICATE_AUTHORITY_FILE)) {
+ zipWriteInFileInZip(zip_file, ca_cert_.c_str(), ca_cert_.length());
+ zipCloseFileInZip(zip_file);
+ }
+ if (is_cert && add_zip_file_entry(zip_file, CERTIFICATE_FILE)) {
+ zipWriteInFileInZip(zip_file, cert_.c_str(), cert_.length());
+ zipCloseFileInZip(zip_file);
+ }
+ if (is_key && add_zip_file_entry(zip_file, KEY_FILE)) {
+ zipWriteInFileInZip(zip_file, key_.c_str(), key_.length());
+ zipCloseFileInZip(zip_file);
+ }
+
+ zipClose(zip_file, NULL);
+ }
+
+ static void full_config_credsv1(StringBuffer& buffer, String host = "cloud.datastax.com",
+ int port = 1443) {
+ Writer writer(buffer);
+ writer.StartObject();
+ writer.Key("username");
+ writer.String("DataStax");
+ writer.Key("password");
+ writer.String("Constellation");
+ writer.Key("host");
+ writer.String(host.c_str());
+ writer.Key("port");
+ writer.Int(port);
+ writer.EndObject();
+ }
+
+private:
+ bool add_zip_file_entry(zipFile zip_file, const String& zip_filename) {
+ zip_fileinfo file_info;
+ memset(&file_info, 0, sizeof(file_info));
+ time_t tmp;
+ time(&tmp);
+ struct tm* time_info = localtime(&tmp);
+ file_info.tmz_date.tm_sec = time_info->tm_sec;
+ file_info.tmz_date.tm_min = time_info->tm_min;
+ file_info.tmz_date.tm_hour = time_info->tm_hour;
+ file_info.tmz_date.tm_mday = time_info->tm_mday;
+ file_info.tmz_date.tm_mon = time_info->tm_mon;
+ file_info.tmz_date.tm_year = time_info->tm_year;
+
+ int rc = zipOpenNewFileInZip(zip_file, zip_filename.c_str(), &file_info, NULL, 0, NULL, 0, NULL,
+ Z_DEFLATED, Z_DEFAULT_COMPRESSION);
+ return rc == ZIP_OK;
+ }
+
+private:
+ String tmp_zip_file_;
+ String ca_cert_;
+ String ca_key_;
+ String cert_;
+ String key_;
+};
+
+TEST_F(CloudSecureConnectionConfigTest, CredsV1) {
+ Config config;
+ CloudSecureConnectionConfig cloud_config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString());
+
+ EXPECT_TRUE(cloud_config.load(creds_zip_file(), &config));
+ EXPECT_EQ("DataStax", cloud_config.username());
+ EXPECT_EQ("Constellation", cloud_config.password());
+ EXPECT_EQ("cloud.datastax.com", cloud_config.host());
+ EXPECT_EQ(1443, cloud_config.port());
+ EXPECT_EQ(ca_cert(), cloud_config.ca_cert());
+ EXPECT_EQ(cert(), cloud_config.cert());
+ EXPECT_EQ(key(), cloud_config.key());
+
+ EXPECT_TRUE(config.ssl_context());
+ EXPECT_TRUE(dynamic_cast(config.auth_provider().get()) != NULL);
+}
+
+TEST_F(CloudSecureConnectionConfigTest, CredsV1WithoutCreds) {
+ Config config;
+ CloudSecureConnectionConfig cloud_config;
+
+ StringBuffer buffer;
+ Writer writer(buffer);
+ writer.StartObject();
+ writer.Key("host");
+ writer.String("bigdata.datastax.com");
+ writer.Key("port");
+ writer.Int(2443);
+ writer.EndObject();
+ create_zip_file(buffer.GetString());
+
+ EXPECT_TRUE(cloud_config.load(creds_zip_file(), &config));
+ EXPECT_EQ("", cloud_config.username());
+ EXPECT_EQ("", cloud_config.password());
+ EXPECT_EQ("bigdata.datastax.com", cloud_config.host());
+ EXPECT_EQ(2443, cloud_config.port());
+ EXPECT_EQ(ca_cert(), cloud_config.ca_cert());
+ EXPECT_EQ(cert(), cloud_config.cert());
+ EXPECT_EQ(key(), cloud_config.key());
+
+ EXPECT_TRUE(config.ssl_context());
+ EXPECT_TRUE(dynamic_cast(config.auth_provider().get()) ==
+ NULL); // Not configured
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1ConfigMissingHost) {
+ CloudSecureConnectionConfig config;
+
+ StringBuffer buffer;
+ Writer writer(buffer);
+ writer.StartObject();
+ writer.Key("username");
+ writer.String("DataStax");
+ writer.Key("password");
+ writer.String("Constellation");
+ writer.Key("port");
+ writer.Int(1443);
+ writer.EndObject();
+ create_zip_file(buffer.GetString());
+
+ EXPECT_FALSE(config.load(creds_zip_file()));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1ConfigMissingPort) {
+ CloudSecureConnectionConfig config;
+
+ StringBuffer buffer;
+ Writer writer(buffer);
+ writer.StartObject();
+ writer.Key("username");
+ writer.String("DataStax");
+ writer.Key("password");
+ writer.String("Constellation");
+ writer.Key("host");
+ writer.String("cloud.datastax.com");
+ writer.EndObject();
+ create_zip_file(buffer.GetString());
+
+ EXPECT_FALSE(config.load(creds_zip_file()));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsMissingZipFile) {
+ CloudSecureConnectionConfig config;
+
+ EXPECT_FALSE(config.load("invalid.zip"));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingConfigJson) {
+ CloudSecureConnectionConfig config;
+
+ create_zip_file("", false);
+ EXPECT_FALSE(config.load(creds_zip_file()));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingCA) {
+ CloudSecureConnectionConfig config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString(), true, false);
+ EXPECT_FALSE(config.load(creds_zip_file()));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingCert) {
+ CloudSecureConnectionConfig config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString(), true, true, false);
+ EXPECT_FALSE(config.load(creds_zip_file()));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingKey) {
+ CloudSecureConnectionConfig config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString(), true, true, false);
+ create_zip_file(buffer.GetString(), true, true, true, false);
+ EXPECT_FALSE(config.load(creds_zip_file()));
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1SslCaCert) {
+ Config config;
+ CloudSecureConnectionConfig cloud_config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ set_invalid_ca_cert();
+ create_zip_file(buffer.GetString());
+
+ EXPECT_FALSE(cloud_config.load(creds_zip_file(), &config));
+ EXPECT_FALSE(config.ssl_context());
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1SslCert) {
+ Config config;
+ CloudSecureConnectionConfig cloud_config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ set_invalid_cert();
+ create_zip_file(buffer.GetString());
+
+ EXPECT_FALSE(cloud_config.load(creds_zip_file(), &config));
+ EXPECT_FALSE(config.ssl_context());
+}
+
+TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1SslKey) {
+ Config config;
+ CloudSecureConnectionConfig cloud_config;
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ set_invalid_key();
+ create_zip_file(buffer.GetString());
+
+ EXPECT_FALSE(cloud_config.load(creds_zip_file(), &config));
+ EXPECT_FALSE(config.ssl_context());
+}
+
+class CloudMetadataServerTest : public CloudSecureConnectionConfigTest {
+public:
+ void SetUp() {
+ CloudSecureConnectionConfigTest::SetUp();
+
+ StringBuffer buffer;
+ full_config_credsv1(buffer, HTTP_MOCK_HOSTNAME, HTTP_MOCK_SERVER_PORT);
+ create_zip_file(buffer.GetString());
+ cloud_config_.load(creds_zip_file(), &config_);
+
+ use_ssl(ca_cert(), ca_key(), HTTP_MOCK_HOSTNAME); // Ensure HttpServer is configured to use SSL
+
+ ClusterSettings settings(config_);
+ resolver_ = config_.cluster_metadata_resolver_factory()->new_instance(settings);
+ }
+
+ void start_http_server(bool is_content_type = true, bool is_contact_info = true,
+ bool is_local_dc = true, bool is_contact_points = true,
+ bool is_sni_proxy_address = true, bool is_port = true) {
+ set_path("/metadata");
+
+ StringBuffer buffer;
+ response_v1(buffer, is_contact_info, is_local_dc, is_contact_points, is_sni_proxy_address,
+ is_port);
+ set_response_body(buffer.GetString());
+
+ set_content_type(is_content_type ? response_v1_content_type() : "invalid");
+
+ HttpTest::start_http_server();
+ }
+
+ const ClusterMetadataResolver::Ptr& resolver() const { return resolver_; }
+
+ static void on_resolve_success(ClusterMetadataResolver* resolver, bool* flag) {
+ *flag = true;
+ EXPECT_EQ("dc1", resolver->local_dc());
+
+ const AddressVec& contact_points = resolver->resolved_contact_points();
+ ASSERT_EQ(3u, contact_points.size());
+ EXPECT_EQ(Address(SNI_HOST, SNI_PORT, SNI_HOST_ID_1), contact_points[0]);
+ EXPECT_EQ(Address(SNI_HOST, SNI_PORT, SNI_HOST_ID_2), contact_points[1]);
+ EXPECT_EQ(Address(SNI_HOST, SNI_PORT, SNI_HOST_ID_3), contact_points[2]);
+ }
+
+ static void on_resolve_success_default_port(ClusterMetadataResolver* resolver, bool* flag) {
+ *flag = true;
+ EXPECT_EQ("dc1", resolver->local_dc());
+
+ const AddressVec& contact_points = resolver->resolved_contact_points();
+ ASSERT_EQ(3u, contact_points.size());
+ EXPECT_EQ(Address(SNI_HOST, METADATA_SERVICE_PORT, SNI_HOST_ID_1), contact_points[0]);
+ EXPECT_EQ(Address(SNI_HOST, METADATA_SERVICE_PORT, SNI_HOST_ID_2), contact_points[1]);
+ EXPECT_EQ(Address(SNI_HOST, METADATA_SERVICE_PORT, SNI_HOST_ID_3), contact_points[2]);
+ }
+
+ static void on_resolve_failed(ClusterMetadataResolver* resolver, bool* flag) {
+ *flag = true;
+ EXPECT_EQ(0u, resolver->resolved_contact_points().size());
+ }
+
+ static void on_resolve_local_dc_failed(ClusterMetadataResolver* resolver, bool* flag) {
+ *flag = true;
+ EXPECT_EQ("", resolver->local_dc());
+ EXPECT_EQ(0u, resolver->resolved_contact_points().size());
+ }
+
+private:
+ static void response_v1(StringBuffer& buffer, bool is_contact_info = true,
+ bool is_local_dc = true, bool is_contact_points = true,
+ bool is_sni_proxy_address = true, bool is_port = true) {
+ Writer writer(buffer);
+ writer.StartObject();
+ writer.Key("version");
+ writer.Int(1);
+ writer.Key("region");
+ writer.String("local");
+ if (is_contact_info) {
+ writer.Key("contact_info");
+ writer.StartObject();
+ writer.Key("type");
+ writer.String("sni_proxy");
+ if (is_local_dc) {
+ writer.Key("local_dc");
+ writer.String(SNI_LOCAL_DC);
+ }
+ if (is_contact_points) {
+ writer.Key("contact_points");
+ writer.StartArray();
+ writer.String(SNI_HOST_ID_1);
+ writer.String(SNI_HOST_ID_2);
+ writer.String(SNI_HOST_ID_3);
+ writer.EndArray();
+ }
+ if (is_sni_proxy_address) {
+ writer.Key("sni_proxy_address");
+ if (is_port) {
+ writer.String(SNI_HOST_AND_PORT);
+ } else {
+ writer.String(SNI_HOST);
+ }
+ }
+ writer.EndObject();
+ }
+ writer.EndObject();
+ }
+
+ static const char* response_v1_content_type() { return "application/json"; }
+
+private:
+ Config config_;
+ CloudSecureConnectionConfig cloud_config_;
+ ClusterMetadataResolver::Ptr resolver_;
+};
+
+TEST_F(CloudMetadataServerTest, ResolveV1StandardSsl) {
+ start_http_server();
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_success, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveV1DefaultPortSsl) {
+ start_http_server(true, true, true, true, true, false);
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points,
+ bind_callback(on_resolve_success_default_port, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, InvalidMetadataServer) {
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+}
+
+TEST_F(CloudMetadataServerTest, ResolveV1InvalidContentTypeSsl) {
+ start_http_server(false);
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveV1MissingContactInfoSsl) {
+ start_http_server(true, false);
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveV1MissingLocalDcSsl) {
+ start_http_server(true, true, false);
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points,
+ bind_callback(on_resolve_local_dc_failed, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveV1MissingContactPointsSsl) {
+ start_http_server(true, true, true, false);
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveV1MissingSniProxyAddressSsl) {
+ start_http_server(true, true, true, true, false);
+
+ bool is_resolved = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolved);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveInvalidJsonResponse) {
+ add_logging_critera("Unable to configure driver from metadata server: Metadata JSON is invalid");
+
+ set_path("/metadata");
+ set_response_body("[]");
+ set_content_type("application/json");
+ HttpTest::start_http_server();
+
+ bool is_resolve_failed = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolve_failed));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolve_failed);
+ EXPECT_EQ(logging_criteria_count(), 1);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveErrorResponse) {
+ add_logging_critera("Unable to configure driver from metadata server: Returned error response "
+ "code 400: 'Invalid version'");
+
+ const char* response_body = "{"
+ "\"code\": 400,"
+ "\"message\": \"Invalid version\""
+ "}";
+
+ set_path("/metadata");
+ set_response_body(response_body);
+ set_response_status_code(400);
+ set_content_type("application/json");
+ HttpTest::start_http_server();
+
+ bool is_resolve_failed = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolve_failed));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolve_failed);
+ EXPECT_EQ(logging_criteria_count(), 1);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, ResolveInvalidJsonErrorResponse) {
+ add_logging_critera("Unable to configure driver from metadata server: Returned error response "
+ "code 400: '[]'");
+
+ set_path("/metadata");
+ set_response_body("[]");
+ set_response_status_code(400);
+ set_content_type("application/json");
+ HttpTest::start_http_server();
+
+ bool is_resolve_failed = false;
+ AddressVec contact_points;
+ resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolve_failed));
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_resolve_failed);
+ EXPECT_EQ(logging_criteria_count(), 1);
+
+ stop_http_server();
+}
+
+TEST_F(CloudMetadataServerTest, CloudConfiguredInvalidContactPointsOverride) {
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString());
+
+ ClusterConfig cluster_config;
+ CassCluster* cluster = CassCluster::to(&cluster_config);
+ EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster, creds_zip_file().c_str()));
+ add_logging_critera("Contact points cannot be overridden with cloud secure connection bundle");
+ EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS,
+ cass_cluster_set_contact_points(cluster, "some.contact.point"));
+ EXPECT_EQ(logging_criteria_count(), 1);
+}
+
+TEST_F(CloudMetadataServerTest, CloudConfiguredInvalidSslContextOverride) {
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString());
+
+ ClusterConfig cluster_config;
+ CassCluster* cluster = CassCluster::to(&cluster_config);
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+ CassSsl* ssl = CassSsl::to(ssl_context.get());
+
+ EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster, creds_zip_file().c_str()));
+ add_logging_critera("SSL context cannot be overridden with cloud secure connection bundle");
+ cass_cluster_set_ssl(cluster, ssl);
+ EXPECT_EQ(logging_criteria_count(), 1);
+}
+
+TEST_F(CloudMetadataServerTest, CloudConfiguredFailureContactPointsExist) {
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString());
+
+ ClusterConfig cluster_config;
+ CassCluster* cluster = CassCluster::to(&cluster_config);
+ EXPECT_EQ(CASS_OK, cass_cluster_set_contact_points(cluster, "some.contact.point"));
+ add_logging_critera("Contact points must not be specified with cloud secure connection bundle");
+ EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS,
+ cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster, creds_zip_file().c_str()));
+ EXPECT_EQ(logging_criteria_count(), 1);
+}
+
+TEST_F(CloudMetadataServerTest, CloudConfiguredFailureSslContextExist) {
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString());
+
+ ClusterConfig cluster_config;
+ CassCluster* cluster = CassCluster::to(&cluster_config);
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+ CassSsl* ssl = CassSsl::to(ssl_context.get());
+
+ cass_cluster_set_ssl(cluster, ssl);
+ add_logging_critera("SSL context must not be specified with cloud secure connection bundle");
+ EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS,
+ cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster, creds_zip_file().c_str()));
+ EXPECT_EQ(logging_criteria_count(), 1);
+}
+
+TEST_F(CloudMetadataServerTest, CloudConfiguredFailureContactPointsAndSslContextExist) {
+ StringBuffer buffer;
+ full_config_credsv1(buffer);
+ create_zip_file(buffer.GetString());
+
+ ClusterConfig cluster_config;
+ CassCluster* cluster = CassCluster::to(&cluster_config);
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+ CassSsl* ssl = CassSsl::to(ssl_context.get());
+
+ EXPECT_EQ(CASS_OK, cass_cluster_set_contact_points(cluster, "some.contact.point"));
+ cass_cluster_set_ssl(cluster, ssl);
+ add_logging_critera(
+ "Contact points and SSL context must not be specified with cloud secure connection bundle");
+ EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS,
+ cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(
+ cluster, creds_zip_file().c_str()));
+ EXPECT_EQ(logging_criteria_count(), 1);
+}
+#endif
diff --git a/gtests/src/unit/tests/test_cluster.cpp b/gtests/src/unit/tests/test_cluster.cpp
index 8e9f91357..b00e31334 100644
--- a/gtests/src/unit/tests/test_cluster.cpp
+++ b/gtests/src/unit/tests/test_cluster.cpp
@@ -288,6 +288,39 @@ class ClusterUnitTest : public EventLoopTest {
};
};
+ class LocalDcClusterMetadataResolver : public ClusterMetadataResolver {
+ public:
+ LocalDcClusterMetadataResolver(const String& local_dc)
+ : desired_local_dc_(local_dc) {}
+
+ private:
+ virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) {
+ resolved_contact_points_ = contact_points;
+ local_dc_ = desired_local_dc_;
+ callback_(this);
+ }
+
+ virtual void internal_cancel() {}
+
+ private:
+ String desired_local_dc_;
+ };
+
+ class LocalDcClusterMetadataResolverFactory : public ClusterMetadataResolverFactory {
+ public:
+ LocalDcClusterMetadataResolverFactory(const String& local_dc)
+ : local_dc_(local_dc) {}
+
+ virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const {
+ return ClusterMetadataResolver::Ptr(new LocalDcClusterMetadataResolver(local_dc_));
+ }
+
+ virtual const char* name() const { return "LocalDc"; }
+
+ private:
+ String local_dc_;
+ };
+
static void on_connection_connected(ClusterConnector* connector, Future* future) {
if (connector->is_ok()) {
future->set();
@@ -329,10 +362,10 @@ TEST_F(ClusterUnitTest, Simple) {
mockssandra::SimpleCluster cluster(simple(), 3);
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
- contact_points.push_back("127.0.0.2");
- contact_points.push_back("127.0.0.3");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
+ contact_points.push_back(Address("127.0.0.2", 9042));
+ contact_points.push_back(Address("127.0.0.3", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -363,13 +396,13 @@ TEST_F(ClusterUnitTest, SimpleWithCriticalFailures) {
.then(mockssandra::Action::Builder().plaintext_auth())
.auth_success();
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1"); // Good
- contact_points.push_back("127.0.0.2"); // Invalid auth
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042)); // Good
+ contact_points.push_back(Address("127.0.0.2", 9042)); // Invalid auth
add_logging_critera("Unable to connect to host 127.0.0.2 because of the "
"following error: Received error response 'Invalid "
"credentials'");
- contact_points.push_back("127.0.0.3"); // Invalid protocol
+ contact_points.push_back(Address("127.0.0.3", 9042)); // Invalid protocol
add_logging_critera("Unable to connect to host 127.0.0.3 because of the "
"following error: Received error response 'Invalid or "
"unsupported protocol version'");
@@ -399,8 +432,8 @@ TEST_F(ClusterUnitTest, Resolve) {
mockssandra::SimpleCluster cluster(simple(), 3);
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("localhost");
+ AddressVec contact_points;
+ contact_points.push_back(Address("localhost", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -416,8 +449,8 @@ TEST_F(ClusterUnitTest, Auth) {
mockssandra::SimpleCluster cluster(auth());
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -440,8 +473,8 @@ TEST_F(ClusterUnitTest, Ssl) {
settings.control_connection_settings.connection_settings = use_ssl(&cluster);
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -461,10 +494,10 @@ TEST_F(ClusterUnitTest, Cancel) {
Vector connect_futures;
Vector connectors;
- ContactPointList contact_points;
- contact_points.push_back("localhost");
- contact_points.push_back("google.com");
- contact_points.push_back("doesnotexist.dne");
+ AddressVec contact_points;
+ contact_points.push_back(Address("localhost", 9042));
+ contact_points.push_back(Address("google.com", 9042));
+ contact_points.push_back(Address("doesnotexist.dne", 9042));
for (size_t i = 0; i < 10; ++i) {
Future::Ptr connect_future(new Future());
@@ -507,8 +540,8 @@ TEST_F(ClusterUnitTest, ReconnectToDiscoveredHosts) {
outage_plan.start_node(1);
outage_plan.stop_node(3);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
@@ -550,8 +583,8 @@ TEST_F(ClusterUnitTest, ReconnectUpdateHosts) {
outage_plan.stop_node(3);
outage_plan.stop_node(1);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
@@ -590,8 +623,8 @@ TEST_F(ClusterUnitTest, CloseDuringReconnect) {
mockssandra::SimpleCluster mock_cluster(simple());
mock_cluster.start_all();
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
@@ -623,8 +656,8 @@ TEST_F(ClusterUnitTest, NotifyDownUp) {
mockssandra::SimpleCluster mock_cluster(simple(), 3);
mock_cluster.start_all();
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
@@ -666,8 +699,8 @@ TEST_F(ClusterUnitTest, ProtocolNegotiation) {
mockssandra::SimpleCluster cluster(builder.build());
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -688,8 +721,8 @@ TEST_F(ClusterUnitTest, NoSupportedProtocols) {
mockssandra::SimpleCluster cluster(builder.build());
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -707,10 +740,10 @@ TEST_F(ClusterUnitTest, FindValidHost) {
mockssandra::SimpleCluster cluster(simple(), 3);
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.99.99.1"); // Invalid
- contact_points.push_back("127.99.99.2"); // Invalid
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.99.99.1", 9042)); // Invalid
+ contact_points.push_back(Address("127.99.99.2", 9042)); // Invalid
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -730,10 +763,10 @@ TEST_F(ClusterUnitTest, NoHostsAvailable) {
// Don't start the cluster
// Try multiple hosts
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
- contact_points.push_back("127.0.0.2");
- contact_points.push_back("127.0.0.3");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
+ contact_points.push_back(Address("127.0.0.2", 9042));
+ contact_points.push_back(Address("127.0.0.3", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -751,8 +784,8 @@ TEST_F(ClusterUnitTest, InvalidAuth) {
mockssandra::SimpleCluster cluster(auth());
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -775,8 +808,8 @@ TEST_F(ClusterUnitTest, InvalidSsl) {
use_ssl(&cluster);
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -803,8 +836,8 @@ TEST_F(ClusterUnitTest, DCAwareRecoverOnRemoteHost) {
Address local_address("127.0.0.1", 9042);
Address remote_address("127.0.0.2", 9042);
- ContactPointList contact_points;
- contact_points.push_back(local_address.to_string());
+ AddressVec contact_points;
+ contact_points.push_back(local_address);
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
@@ -858,8 +891,8 @@ TEST_F(ClusterUnitTest, InvalidDC) {
mockssandra::SimpleCluster cluster(simple());
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -886,8 +919,8 @@ TEST_F(ClusterUnitTest, DisableEventsOnStartup) {
mockssandra::SimpleCluster cluster(simple(), 2);
ASSERT_EQ(cluster.start_all(), 0);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(new Future());
ClusterConnector::Ptr connector(
@@ -929,8 +962,8 @@ TEST_F(ClusterUnitTest, ReconnectionPolicy) {
outage_plan.stop_node(1);
outage_plan.start_node(1);
- ContactPointList contact_points;
- contact_points.push_back("127.0.0.1");
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
@@ -957,3 +990,80 @@ TEST_F(ClusterUnitTest, ReconnectionPolicy) {
EXPECT_GE(policy->scheduled_delay_count(), 2u);
EXPECT_EQ(3u, mock_cluster.connection_attempts(1)); // Includes initial connection attempt
}
+
+TEST_F(ClusterUnitTest, LocalDcFromResolver) {
+ mockssandra::SimpleCluster cluster(simple(), 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", 9042));
+
+ Future::Ptr connect_future(new Future());
+ ClusterConnector::Ptr connector(
+ new ClusterConnector(contact_points, PROTOCOL_VERSION,
+ bind_callback(on_connection_reconnect, connect_future.get())));
+
+ ClusterSettings settings;
+ settings.cluster_metadata_resolver_factory = ClusterMetadataResolverFactory::Ptr(
+ new LocalDcClusterMetadataResolverFactory("this_local_dc"));
+ connector->with_settings(settings)->connect(event_loop());
+
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(connect_future->error());
+ ASSERT_EQ("this_local_dc", connect_future->cluster()->local_dc());
+}
+
+TEST_F(ClusterUnitTest, NoContactPoints) {
+ // No cluster needed
+
+ AddressVec contact_points; // Empty
+
+ Future::Ptr connect_future(new Future());
+ ClusterConnector::Ptr connector(
+ new ClusterConnector(contact_points, PROTOCOL_VERSION,
+ bind_callback(on_connection_connected, connect_future.get())));
+ connector->connect(event_loop());
+
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME))
+ << "Timed out waiting for cluster to connect";
+ ASSERT_TRUE(connect_future->error());
+ EXPECT_EQ(connect_future->error()->code, CASS_ERROR_LIB_NO_HOSTS_AVAILABLE);
+}
+
+TEST_F(ClusterUnitTest, PortIsAssignedDuringConnection) {
+ mockssandra::SimpleCluster cluster(simple(), 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ AddressVec contact_points;
+ contact_points.push_back(Address("127.0.0.1", -1));
+
+ Future::Ptr connect_future(new Future());
+ ClusterConnector::Ptr connector(
+ new ClusterConnector(contact_points, PROTOCOL_VERSION,
+ bind_callback(on_connection_reconnect, connect_future.get())));
+
+ ClusterSettings settings; // Default port and metadata resolver
+ connector->with_settings(settings)->connect(event_loop());
+
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(connect_future->error());
+}
+
+TEST_F(ClusterUnitTest, HostIsResolvedAndPortIsAssignedDuringConnection) {
+ mockssandra::SimpleCluster cluster(simple(), 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ AddressVec contact_points;
+ contact_points.push_back(Address("localhost", -1));
+
+ Future::Ptr connect_future(new Future());
+ ClusterConnector::Ptr connector(
+ new ClusterConnector(contact_points, PROTOCOL_VERSION,
+ bind_callback(on_connection_reconnect, connect_future.get())));
+
+ ClusterSettings settings; // Default port and metadata resolver
+ connector->with_settings(settings)->connect(event_loop());
+
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(connect_future->error());
+}
diff --git a/gtests/src/unit/tests/test_connection.cpp b/gtests/src/unit/tests/test_connection.cpp
index 6f3d8b823..1f392faa1 100644
--- a/gtests/src/unit/tests/test_connection.cpp
+++ b/gtests/src/unit/tests/test_connection.cpp
@@ -303,7 +303,7 @@ TEST_F(ConnectionUnitTest, SslCancel) {
}
TEST_F(ConnectionUnitTest, Timeout) {
- mockssandra::RequestHandler::Builder builder;
+ mockssandra::SimpleRequestHandlerBuilder builder;
builder.on(mockssandra::OPCODE_STARTUP).no_result(); // Don't return a response
mockssandra::SimpleCluster cluster(builder.build());
ASSERT_EQ(cluster.start_all(), 0);
diff --git a/gtests/src/unit/tests/test_decoder.cpp b/gtests/src/unit/tests/test_decoder.cpp
index 33ae4f98f..c9a75f64b 100644
--- a/gtests/src/unit/tests/test_decoder.cpp
+++ b/gtests/src/unit/tests/test_decoder.cpp
@@ -740,7 +740,7 @@ TEST_F(DecoderUnitTest, DecodeStringMultiMap) {
0, 6, 80, 121, 116, 104, 111, 110, // Python
0, 4, 82, 117, 98, 121 }; // Ruby
TestDecoder decoder(input, 58);
- Map > value;
+ StringMultimap value;
// SUCCESS
ASSERT_TRUE(decoder.decode_string_multimap(value));
diff --git a/gtests/src/unit/tests/test_exec_profile.cpp b/gtests/src/unit/tests/test_exec_profile.cpp
index d4b9fdde9..85365ca8d 100644
--- a/gtests/src/unit/tests/test_exec_profile.cpp
+++ b/gtests/src/unit/tests/test_exec_profile.cpp
@@ -41,8 +41,8 @@ TEST(ExecutionProfileUnitTest, Consistency) {
Config copy_config = config.new_instance();
ExecutionProfile profile_lookup;
ASSERT_TRUE(execution_profile(copy_config, "profile", profile_lookup));
- ASSERT_EQ(CASS_DEFAULT_CONSISTENCY, profile_lookup.consistency());
- ASSERT_EQ(CASS_DEFAULT_CONSISTENCY, copy_config.default_profile().consistency());
+ ASSERT_EQ(CASS_CONSISTENCY_UNKNOWN, profile_lookup.consistency());
+ ASSERT_EQ(CASS_CONSISTENCY_UNKNOWN, copy_config.default_profile().consistency());
}
TEST(ExecutionProfileUnitTest, SerialConsistency) {
diff --git a/gtests/src/unit/tests/test_http_client.cpp b/gtests/src/unit/tests/test_http_client.cpp
new file mode 100644
index 000000000..aa6ef14f0
--- /dev/null
+++ b/gtests/src/unit/tests/test_http_client.cpp
@@ -0,0 +1,295 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "http_test.hpp"
+
+#include "driver_info.hpp"
+#include "http_client.hpp"
+
+using namespace datastax::internal;
+using datastax::internal::core::HttpClient;
+using datastax::internal::core::SocketSettings;
+using datastax::internal::core::SslContext;
+using datastax::internal::core::SslContextFactory;
+using mockssandra::Ssl;
+
+class HttpClientUnitTest : public HttpTest {
+public:
+ static void on_success_response(HttpClient* client, bool* flag) {
+ *flag = true;
+ EXPECT_TRUE(client->is_ok()) << "Failed to connect: " << client->error_message();
+ EXPECT_EQ("text/plain", client->content_type());
+ EXPECT_EQ(echo_response(), client->response_body());
+ }
+
+ static void on_failed_response(HttpClient* client, bool* flag) {
+ *flag = true;
+ EXPECT_FALSE(client->is_ok());
+ }
+
+ static void on_canceled(HttpClient* client, bool* flag) {
+ if (client->is_canceled()) {
+ *flag = true;
+ }
+ }
+
+private:
+ static String echo_response() {
+ OStringStream ss;
+
+ ss << "GET / HTTP/1.0\r\n"
+ << "Host: " HTTP_MOCK_SERVER_IP << ":" << HTTP_MOCK_SERVER_PORT << "\r\n"
+ << "User-Agent: cpp-driver/" << driver_version() << "\r\nAccept: */*\r\n\r\n";
+
+ return ss.str();
+ }
+};
+
+TEST_F(HttpClientUnitTest, Simple) {
+ start_http_server();
+
+ bool is_success = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_success_response, &is_success)));
+ client->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_success);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, Cancel) {
+ start_http_server();
+
+ Vector clients;
+
+ bool is_canceled = false;
+ for (size_t i = 0; i < 10; ++i) {
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_canceled, &is_canceled)));
+ client->request(loop());
+ clients.push_back(client);
+ }
+
+ Vector::iterator it = clients.begin();
+ while (it != clients.end()) {
+ (*it)->cancel();
+ uv_run(loop(), UV_RUN_NOWAIT);
+ it++;
+ }
+
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_canceled);
+}
+
+TEST_F(HttpClientUnitTest, CancelTimeout) {
+ set_close_connnection_after_request(false);
+ start_http_server();
+
+ Vector clients;
+
+ bool is_canceled = false;
+ for (size_t i = 0; i < 10; ++i) {
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT),
+ "/invalid", bind_callback(on_canceled, &is_canceled)));
+ client
+ ->with_request_timeout_ms(200) // Timeout quickly
+ ->request(loop());
+ clients.push_back(client);
+ }
+
+ Vector::iterator it = clients.begin();
+ while (it != clients.end()) {
+ (*it)->cancel();
+ uv_run(loop(), UV_RUN_NOWAIT);
+ it++;
+ }
+
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_canceled);
+
+ for (Vector::const_iterator it = clients.begin(), end = clients.end(); it != end;
+ ++it) {
+ const HttpClient::Ptr& client(*it);
+ if (!client->is_canceled()) {
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_TIMEOUT);
+ EXPECT_EQ(client->status_code(), 404);
+ }
+ }
+}
+
+TEST_F(HttpClientUnitTest, InvalidHttpServer) {
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_failed_response, &is_failed)));
+ client->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET);
+}
+
+TEST_F(HttpClientUnitTest, InvalidHttpServerResponse) {
+ enable_valid_response(false);
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_failed_response, &is_failed)));
+ client->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_PARSING);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, InvalidPath) {
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT),
+ "/invalid", bind_callback(on_failed_response, &is_failed)));
+ client->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_HTTP_STATUS);
+ EXPECT_EQ(client->status_code(), 404);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, Timeout) {
+ set_close_connnection_after_request(false);
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT),
+ "/invalid", bind_callback(on_failed_response, &is_failed)));
+ client
+ ->with_request_timeout_ms(200) // Timeout quickly
+ ->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_TIMEOUT);
+ EXPECT_EQ(client->status_code(), 404);
+
+ stop_http_server();
+}
+
+#ifdef HAVE_OPENSSL
+TEST_F(HttpClientUnitTest, Ssl) {
+ SocketSettings settings = use_ssl();
+ start_http_server();
+
+ bool is_success = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_success_response, &is_success)));
+ client->with_settings(settings)->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_success);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, NoClientCertProvidedSsl) {
+ String ca_key = mockssandra::Ssl::generate_key();
+ String ca_cert = mockssandra::Ssl::generate_cert(ca_key, "CA");
+
+ use_ssl(ca_key, ca_cert, HTTP_MOCK_HOSTNAME);
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_failed_response, &is_failed)));
+
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+
+ // No client certificate provided
+
+ ssl_context->add_trusted_cert(ca_cert.c_str(), ca_cert.size());
+
+ SocketSettings settings;
+ settings.ssl_context = ssl_context;
+ client->with_settings(settings)->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, InvalidClientCertSsl) {
+ String ca_key = mockssandra::Ssl::generate_key();
+ String ca_cert = mockssandra::Ssl::generate_cert(ca_key, "CA");
+
+ String client_key = mockssandra::Ssl::generate_key();
+ String client_cert = mockssandra::Ssl::generate_cert(client_key, ""); // Self-signed
+
+ use_ssl(ca_key, ca_cert, HTTP_MOCK_HOSTNAME);
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_failed_response, &is_failed)));
+
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+
+ ssl_context->set_cert(client_cert.c_str(), client_cert.size());
+ ssl_context->set_private_key(client_key.c_str(), client_key.size(), "",
+ 0); // No password expected for the private key
+ ssl_context->add_trusted_cert(ca_cert.c_str(), ca_cert.size());
+
+ SocketSettings settings;
+ settings.ssl_context = ssl_context;
+
+ client->with_settings(settings)->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, InvalidClientSslNotConfigured) {
+ use_ssl();
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_failed_response, &is_failed)));
+ client->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_CLOSED);
+
+ stop_http_server();
+}
+
+TEST_F(HttpClientUnitTest, InvalidServerSslNotConfigured) {
+ SocketSettings settings = use_ssl("127.0.0.1", false);
+ start_http_server();
+
+ bool is_failed = false;
+ HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/",
+ bind_callback(on_failed_response, &is_failed)));
+ client->with_settings(settings)->request(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+ EXPECT_TRUE(is_failed);
+ EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET);
+
+ stop_http_server();
+}
+#endif
diff --git a/gtests/src/unit/tests/test_load_balancing.cpp b/gtests/src/unit/tests/test_load_balancing.cpp
index 31fdf2f75..48a597e9a 100644
--- a/gtests/src/unit/tests/test_load_balancing.cpp
+++ b/gtests/src/unit/tests/test_load_balancing.cpp
@@ -169,7 +169,7 @@ void test_dc_aware_policy(size_t local_count, size_t remote_count) {
populate_hosts(local_count, "rack", LOCAL_DC, &hosts);
populate_hosts(remote_count, "rack", REMOTE_DC, &hosts);
DCAwarePolicy policy(LOCAL_DC, remote_count, false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
const size_t total_hosts = local_count + remote_count;
@@ -185,7 +185,7 @@ TEST(RoundRobinLoadBalancingUnitTest, Simple) {
populate_hosts(2, "rack", "dc", &hosts);
RoundRobinPolicy policy;
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
// start on first elem
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
@@ -207,7 +207,7 @@ TEST(RoundRobinLoadBalancingUnitTest, OnAdd) {
populate_hosts(2, "rack", "dc", &hosts);
RoundRobinPolicy policy;
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
// baseline
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
@@ -230,7 +230,7 @@ TEST(RoundRobinLoadBalancingUnitTest, OnRemove) {
populate_hosts(3, "rack", "dc", &hosts);
RoundRobinPolicy policy;
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
SharedRefPtr host = hosts.begin()->second;
@@ -251,7 +251,7 @@ TEST(RoundRobinLoadBalancingUnitTest, OnUpAndDown) {
populate_hosts(3, "rack", "dc", &hosts);
RoundRobinPolicy policy;
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp_before1(policy.new_query_plan("ks", NULL, NULL));
ScopedPtr qp_before2(policy.new_query_plan("ks", NULL, NULL));
@@ -297,7 +297,7 @@ TEST(RoundRobinLoadBalancingUnitTest, VerifyEqualDistribution) {
populate_hosts(3, "rack", "dc", &hosts);
RoundRobinPolicy policy;
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
{ // All nodes
QueryCounts counts(run_policy(policy, 12));
@@ -338,7 +338,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, SomeDatacenterLocalUnspecified) {
h->set_rack_and_dc("", "");
DCAwarePolicy policy(LOCAL_DC, 1, false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
@@ -353,7 +353,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, SingleLocalDown) {
populate_hosts(1, "rack", REMOTE_DC, &hosts);
DCAwarePolicy policy(LOCAL_DC, 1, false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp_before(
policy.new_query_plan("ks", NULL, NULL)); // has down host ptr in plan
@@ -380,7 +380,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, AllLocalRemovedReturned) {
populate_hosts(1, "rack", REMOTE_DC, &hosts);
DCAwarePolicy policy(LOCAL_DC, 1, false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp_before(
policy.new_query_plan("ks", NULL, NULL)); // has down host ptr in plan
@@ -412,7 +412,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, RemoteRemovedReturned) {
SharedRefPtr target_host = hosts[target_addr];
DCAwarePolicy policy(LOCAL_DC, 1, false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp_before(
policy.new_query_plan("ks", NULL, NULL)); // has down host ptr in plan
@@ -443,7 +443,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, UsedHostsPerDatacenter) {
for (size_t used_hosts = 0; used_hosts < 4; ++used_hosts) {
DCAwarePolicy policy(LOCAL_DC, used_hosts, false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
Vector seq;
@@ -476,7 +476,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, AllowRemoteDatacentersForLocalConsist
// Not allowing remote DCs for local CLs
bool allow_remote_dcs_for_local_cl = false;
DCAwarePolicy policy(LOCAL_DC, 3, !allow_remote_dcs_for_local_cl);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
// Set local CL
QueryRequest::Ptr request(new QueryRequest("", 0));
@@ -494,7 +494,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, AllowRemoteDatacentersForLocalConsist
// Allowing remote DCs for local CLs
bool allow_remote_dcs_for_local_cl = true;
DCAwarePolicy policy(LOCAL_DC, 3, !allow_remote_dcs_for_local_cl);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
// Set local CL
QueryRequest::Ptr request(new QueryRequest("", 0));
@@ -517,7 +517,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, StartWithEmptyLocalDatacenter) {
// Set local DC using connected host
{
DCAwarePolicy policy("", 0, false);
- policy.init(hosts[Address("2.0.0.0", 9042)], hosts, NULL);
+ policy.init(hosts[Address("2.0.0.0", 9042)], hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
const size_t seq[] = { 2, 3, 4 };
@@ -527,7 +527,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, StartWithEmptyLocalDatacenter) {
// Set local DC using first host with non-empty DC
{
DCAwarePolicy policy("", 0, false);
- policy.init(SharedRefPtr(new Host(Address("0.0.0.0", 9042))), hosts, NULL);
+ policy.init(SharedRefPtr(new Host(Address("0.0.0.0", 9042))), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
const size_t seq[] = { 1 };
@@ -547,7 +547,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, VerifyEqualDistributionLocalDc) {
populate_hosts(3, "rack", REMOTE_DC, &hosts);
DCAwarePolicy policy("", 0, false);
- policy.init(hosts.begin()->second, hosts, NULL);
+ policy.init(hosts.begin()->second, hosts, NULL, "");
{ // All local nodes
QueryCounts counts(run_policy(policy, 12));
@@ -590,7 +590,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, VerifyEqualDistributionRemoteDc) {
populate_hosts(3, "rack", REMOTE_DC, &hosts);
DCAwarePolicy policy("", 3, false); // Allow all remote DC nodes
- policy.init(hosts.begin()->second, hosts, NULL);
+ policy.init(hosts.begin()->second, hosts, NULL, "");
Host::Ptr remote_dc_node1;
{ // Mark down all local nodes
@@ -664,7 +664,7 @@ TEST(TokenAwareLoadBalancingUnitTest, Simple) {
token_map->build();
TokenAwarePolicy policy(new RoundRobinPolicy(), false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
QueryRequest::Ptr request(new QueryRequest("", 1));
const char* value = "kjdfjkldsdjkl"; // hash: 9024137376112061887
@@ -737,7 +737,7 @@ TEST(TokenAwareLoadBalancingUnitTest, NetworkTopology) {
token_map->build();
TokenAwarePolicy policy(new DCAwarePolicy(LOCAL_DC, num_hosts / 2, false), false);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
QueryRequest::Ptr request(new QueryRequest("", 1));
const char* value = "abc"; // hash: -5434086359492102041
@@ -811,7 +811,7 @@ TEST(TokenAwareLoadBalancingUnitTest, ShuffleReplicas) {
HostVec not_shuffled;
{
TokenAwarePolicy policy(new RoundRobinPolicy(), false); // Not shuffled
- policy.init(SharedRefPtr(), hosts, &random);
+ policy.init(SharedRefPtr(), hosts, &random, "");
ScopedPtr qp1(policy.new_query_plan("test", request_handler.get(), token_map.get()));
for (int i = 0; i < num_hosts; ++i) {
not_shuffled.push_back(qp1->compute_next());
@@ -829,7 +829,7 @@ TEST(TokenAwareLoadBalancingUnitTest, ShuffleReplicas) {
// Verify that the shuffle setting does indeed shuffle the replicas
{
TokenAwarePolicy shuffle_policy(new RoundRobinPolicy(), true); // Shuffled
- shuffle_policy.init(SharedRefPtr(), hosts, &random);
+ shuffle_policy.init(SharedRefPtr(), hosts, &random, "");
HostVec shuffled_previous;
ScopedPtr qp(
@@ -927,7 +927,7 @@ TEST(LatencyAwareLoadBalancingUnitTest, Simple) {
HostMap hosts;
populate_hosts(num_hosts, "rack1", LOCAL_DC, &hosts);
LatencyAwarePolicy policy(new RoundRobinPolicy(), settings);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
// Record some latencies with 100 ns being the minimum
for (HostMap::iterator i = hosts.begin(); i != hosts.end(); ++i) {
@@ -989,7 +989,7 @@ TEST(LatencyAwareLoadBalancingUnitTest, MinAverageUnderMinMeasured) {
HostMap hosts;
populate_hosts(num_hosts, "rack1", LOCAL_DC, &hosts);
LatencyAwarePolicy policy(new RoundRobinPolicy(), settings);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
int count = 1;
for (HostMap::iterator i = hosts.begin(); i != hosts.end(); ++i) {
@@ -1023,7 +1023,7 @@ TEST(WhitelistLoadBalancingUnitTest, Hosts) {
whitelist_hosts.push_back("37.0.0.0");
whitelist_hosts.push_back("83.0.0.0");
WhitelistPolicy policy(new RoundRobinPolicy(), whitelist_hosts);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
@@ -1044,7 +1044,7 @@ TEST(WhitelistLoadBalancingUnitTest, Datacenters) {
whitelist_dcs.push_back(LOCAL_DC);
whitelist_dcs.push_back(REMOTE_DC);
WhitelistDCPolicy policy(new RoundRobinPolicy(), whitelist_dcs);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
@@ -1064,7 +1064,7 @@ TEST(BlacklistLoadBalancingUnitTest, Hosts) {
blacklist_hosts.push_back("2.0.0.0");
blacklist_hosts.push_back("3.0.0.0");
BlacklistPolicy policy(new RoundRobinPolicy(), blacklist_hosts);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
@@ -1085,7 +1085,7 @@ TEST(BlacklistLoadBalancingUnitTest, Datacenters) {
blacklist_dcs.push_back(LOCAL_DC);
blacklist_dcs.push_back(REMOTE_DC);
BlacklistDCPolicy policy(new RoundRobinPolicy(), blacklist_dcs);
- policy.init(SharedRefPtr(), hosts, NULL);
+ policy.init(SharedRefPtr(), hosts, NULL, "");
ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL));
diff --git a/gtests/src/unit/tests/test_pool.cpp b/gtests/src/unit/tests/test_pool.cpp
index 79bd11504..6208ce962 100644
--- a/gtests/src/unit/tests/test_pool.cpp
+++ b/gtests/src/unit/tests/test_pool.cpp
@@ -70,9 +70,9 @@ class PoolUnitTest : public LoopTest {
: public RequestState
, public Status {
public:
- RequestStatus(uv_loop_t* loop, int num_nodes = NUM_NODES)
+ RequestStatus(uv_loop_t* loop, int num_requests = NUM_NODES)
: loop_(loop)
- , remaining_(num_nodes) {}
+ , remaining_(num_requests) {}
virtual void set(RequestState::Enum state) {
Status::set(state);
@@ -88,13 +88,13 @@ class PoolUnitTest : public LoopTest {
protected:
uv_loop_t* loop_;
- size_t remaining_;
+ int remaining_;
};
class RequestStatusWithManager : public RequestStatus {
public:
- RequestStatusWithManager(uv_loop_t* loop, int num_nodes = NUM_NODES)
- : RequestStatus(loop, num_nodes) {}
+ RequestStatusWithManager(uv_loop_t* loop, int num_requests = NUM_NODES)
+ : RequestStatus(loop, num_requests) {}
~RequestStatusWithManager() {
ConnectionPoolManager::Ptr temp(manager());
@@ -324,7 +324,7 @@ class PoolUnitTest : public LoopTest {
if (connection) {
RequestStatus status(manager->loop(), 1);
RequestCallback::Ptr callback(new RequestCallback(&status));
- EXPECT_TRUE(connection->write(callback.get()))
+ EXPECT_TRUE(connection->write(callback.get()) > 0)
<< "Unable to write request to connection " << address.to_string();
connection->flush(); // Flush requests to avoid unnecessary timeouts
uv_run(loop(), UV_RUN_DEFAULT);
@@ -344,7 +344,7 @@ class PoolUnitTest : public LoopTest {
PooledConnection::Ptr connection = manager->find_least_busy(generator.next());
if (connection) {
RequestCallback::Ptr callback(new RequestCallback(status));
- if (!connection->write(callback.get())) {
+ if (connection->write(callback.get()) < 0) {
status->error_failed_write();
}
} else {
@@ -354,6 +354,33 @@ class PoolUnitTest : public LoopTest {
}
}
+ static void on_pool_connected_exhaust_streams(ConnectionPoolManagerInitializer* initializer,
+ RequestStatusWithManager* status) {
+ const Address address("127.0.0.1", 9042);
+ ConnectionPoolManager::Ptr manager = initializer->release_manager();
+ status->set_manager(manager);
+
+ for (size_t i = 0; i < CASS_MAX_STREAMS; ++i) {
+ PooledConnection::Ptr connection = manager->find_least_busy(address);
+
+ if (connection) {
+ RequestCallback::Ptr callback(new RequestCallback(status));
+ if (connection->write(callback.get()) < 0) {
+ status->error_failed_write();
+ }
+ } else {
+ status->error_no_connection();
+ }
+ }
+
+ PooledConnection::Ptr connection = manager->find_least_busy(address);
+ ASSERT_TRUE(connection);
+ RequestCallback::Ptr callback(new RequestCallback(status));
+ EXPECT_EQ(connection->write(callback.get()), Request::REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS);
+
+ manager->flush();
+ }
+
static void on_pool_nop(ConnectionPoolManagerInitializer* initializer,
RequestStatusWithManager* status) {
ConnectionPoolManager::Ptr manager = initializer->release_manager();
@@ -798,6 +825,17 @@ TEST_F(PoolUnitTest, PartialReconnect) {
// TODO:
}
-TEST_F(PoolUnitTest, LowNumberOfStreams) {
- // TODO:
+TEST_F(PoolUnitTest, NoAvailableStreams) {
+ mockssandra::SimpleCluster cluster(simple(), 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ RequestStatusWithManager status(loop(), CASS_MAX_STREAMS);
+
+ ConnectionPoolManagerInitializer::Ptr initializer(new ConnectionPoolManagerInitializer(
+ PROTOCOL_VERSION, bind_callback(on_pool_connected_exhaust_streams, &status)));
+
+ initializer->initialize(loop(), hosts());
+ uv_run(loop(), UV_RUN_DEFAULT);
+
+ EXPECT_EQ(status.count(RequestStatus::SUCCESS), CASS_MAX_STREAMS) << status.results();
}
diff --git a/gtests/src/unit/tests/test_request_processor.cpp b/gtests/src/unit/tests/test_request_processor.cpp
index 80c83ca9b..d1900397d 100644
--- a/gtests/src/unit/tests/test_request_processor.cpp
+++ b/gtests/src/unit/tests/test_request_processor.cpp
@@ -27,19 +27,91 @@
using namespace datastax::internal;
using namespace datastax::internal::core;
+class InorderLoadBalancingPolicy : public LoadBalancingPolicy {
+public:
+ typedef SharedRefPtr Ptr;
+ typedef Vector Vec;
+
+ InorderLoadBalancingPolicy()
+ : LoadBalancingPolicy()
+ , hosts_(new HostVec()) {}
+
+ virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random,
+ const String& local_dc) {
+ hosts_->reserve(hosts.size());
+ std::transform(hosts.begin(), hosts.end(), std::back_inserter(*hosts_), GetHost());
+ }
+
+ virtual CassHostDistance distance(const Host::Ptr& host) const {
+ return CASS_HOST_DISTANCE_LOCAL;
+ }
+
+ virtual bool is_host_up(const Address& address) const {
+ return std::find_if(hosts_->begin(), hosts_->end(), FindAddress(address)) != hosts_->end();
+ }
+
+ virtual void on_host_added(const Host::Ptr& host) { add_host(hosts_, host); }
+
+ virtual void on_host_removed(const Host::Ptr& host) { remove_host(hosts_, host); }
+
+ virtual void on_host_up(const Host::Ptr& host) { add_host(hosts_, host); }
+
+ virtual void on_host_down(const Address& address) { remove_host(hosts_, address); }
+
+ virtual QueryPlan* new_query_plan(const String& keyspace, RequestHandler* request_handler,
+ const TokenMap* token_map) {
+ return new InternalQueryPlan(hosts_);
+ }
+
+ virtual LoadBalancingPolicy* new_instance() { return new InorderLoadBalancingPolicy(); }
+
+private:
+ struct FindAddress {
+
+ FindAddress(const Address& address)
+ : address(address) {}
+
+ bool operator()(const Host::Ptr& host) const { return host->address() == address; }
+
+ Address address;
+ };
+
+ class InternalQueryPlan : public datastax::internal::core::QueryPlan {
+ public:
+ InternalQueryPlan(const CopyOnWriteHostVec& hosts)
+ : index_(0)
+ , hosts_(hosts) {}
+
+ virtual Host::Ptr compute_next() {
+ if (index_ < hosts_->size()) {
+ return (*hosts_)[index_++];
+ }
+ return Host::Ptr();
+ }
+
+ private:
+ size_t index_;
+ CopyOnWriteHostVec hosts_;
+ };
+
+private:
+ CopyOnWriteHostVec hosts_;
+};
+
class RequestProcessorUnitTest : public EventLoopTest {
public:
RequestProcessorUnitTest()
: EventLoopTest("RequestProcessorUnitTest") {}
- HostMap generate_hosts() {
+ HostMap generate_hosts(size_t num_hosts = 3) {
HostMap hosts;
- Host::Ptr host1(new Host(Address("127.0.0.1", PORT)));
- Host::Ptr host2(new Host(Address("127.0.0.2", PORT)));
- Host::Ptr host3(new Host(Address("127.0.0.3", PORT)));
- hosts[host1->address()] = host1;
- hosts[host2->address()] = host2;
- hosts[host3->address()] = host3;
+ num_hosts = std::min(num_hosts, static_cast(255));
+ for (size_t i = 1; i <= num_hosts; ++i) {
+ char buf[64];
+ sprintf(buf, "127.0.0.%d", static_cast(i));
+ Host::Ptr host(new Host(Address(buf, PORT)));
+ hosts[host->address()] = host;
+ }
return hosts;
}
@@ -205,7 +277,7 @@ TEST_F(RequestProcessorUnitTest, Simple) {
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
initializer->initialize(event_loop());
@@ -224,7 +296,7 @@ TEST_F(RequestProcessorUnitTest, CloseWithRequestsPending) {
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
initializer->initialize(event_loop());
@@ -262,7 +334,7 @@ TEST_F(RequestProcessorUnitTest, Auth) {
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
RequestProcessorSettings settings;
@@ -287,7 +359,7 @@ TEST_F(RequestProcessorUnitTest, Ssl) {
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
initializer->with_settings(settings)->initialize(event_loop());
@@ -311,7 +383,7 @@ TEST_F(RequestProcessorUnitTest, NotifyAddRemoveHost) {
Future::Ptr up_future(new Future());
Future::Ptr down_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
RequestProcessorSettings settings;
@@ -343,7 +415,7 @@ TEST_F(RequestProcessorUnitTest, CloseDuringReconnect) {
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
RequestProcessorSettings settings;
@@ -378,7 +450,7 @@ TEST_F(RequestProcessorUnitTest, CloseDuringAddNewHost) {
Future::Ptr close_future(new Future());
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
CloseListener::Ptr listener(new CloseListener(close_future));
@@ -408,7 +480,7 @@ TEST_F(RequestProcessorUnitTest, PoolDown) {
Future::Ptr up_future(new Future());
Future::Ptr down_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
UpDownListener::Ptr listener(new UpDownListener(up_future, down_future, target_host));
@@ -438,7 +510,7 @@ TEST_F(RequestProcessorUnitTest, PoolUp) {
Future::Ptr up_future(new Future());
Future::Ptr down_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
RequestProcessorSettings settings;
@@ -466,7 +538,7 @@ TEST_F(RequestProcessorUnitTest, InvalidAuth) {
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
RequestProcessorSettings settings;
@@ -492,7 +564,7 @@ TEST_F(RequestProcessorUnitTest, InvalidSsl) {
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
SslContext::Ptr ssl_context(SslContextFactory::create()); // No trusted cert
@@ -528,7 +600,7 @@ TEST_F(RequestProcessorUnitTest, RollingRestart) {
HostMap hosts(generate_hosts());
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
RequestProcessorSettings settings;
@@ -560,7 +632,7 @@ TEST_F(RequestProcessorUnitTest, NoHostsAvailable) {
HostMap hosts(generate_hosts());
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
initializer->with_listener(listener.get())->initialize(event_loop());
@@ -596,7 +668,7 @@ TEST_F(RequestProcessorUnitTest, RequestTimeout) {
HostMap hosts(generate_hosts());
Future::Ptr connect_future(new Future());
RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
- hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(),
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
bind_callback(on_connected, connect_future.get())));
initializer->with_listener(listener.get())->initialize(event_loop());
@@ -619,3 +691,73 @@ TEST_F(RequestProcessorUnitTest, RequestTimeout) {
processor->close();
ASSERT_TRUE(close_future->wait_for(WAIT_FOR_TIME));
}
+
+TEST_F(RequestProcessorUnitTest, LowNumberOfStreams) {
+ mockssandra::SimpleRequestHandlerBuilder builder;
+ builder.on(mockssandra::OPCODE_QUERY)
+ .wait(1000) // Give time for the streams to run out
+ .system_local()
+ .system_peers()
+ .empty_rows_result(1);
+ mockssandra::SimpleCluster cluster(builder.build(), 2); // Two node cluster
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Future::Ptr close_future(new Future());
+ CloseListener::Ptr listener(new CloseListener(close_future));
+
+ HostMap hosts(generate_hosts(2));
+ Future::Ptr connect_future(new Future());
+
+ ExecutionProfile profile;
+ profile.set_load_balancing_policy(new InorderLoadBalancingPolicy());
+ profile.set_speculative_execution_policy(new NoSpeculativeExecutionPolicy());
+ profile.set_retry_policy(new DefaultRetryPolicy());
+
+ RequestProcessorSettings settings;
+ settings.default_profile = profile;
+ settings.request_queue_size = 2 * CASS_MAX_STREAMS + 1; // Create a request queue with enough room
+
+ RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer(
+ hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "",
+ bind_callback(on_connected, connect_future.get())));
+ initializer->with_settings(settings)->with_listener(listener.get())->initialize(event_loop());
+
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(connect_future->error());
+ RequestProcessor::Ptr processor(connect_future->processor());
+
+ // Saturate the hosts connections, but leave one stream.
+ for (int i = 0; i < 2 * CASS_MAX_STREAMS - 1; ++i) {
+ ResponseFuture::Ptr response_future(new ResponseFuture());
+ Statement::Ptr request(new QueryRequest("SELECT * FROM table"));
+ RequestHandler::Ptr request_handler(new RequestHandler(request, response_future));
+ processor->process_request(request_handler);
+ }
+
+ { // Try two more requests. One should succeed on "127.0.0.2" and the other should fail (out of
+ // streams).
+ ResponseFuture::Ptr response_future(new ResponseFuture());
+
+ Statement::Ptr request(new QueryRequest("SELECT * FROM table"));
+ request->set_record_attempted_addresses(true);
+ RequestHandler::Ptr request_handler(new RequestHandler(request, response_future));
+ processor->process_request(request_handler);
+
+ ResponseFuture::Ptr response_future_fail(new ResponseFuture());
+ RequestHandler::Ptr request_handler_fail(new RequestHandler(
+ Statement::Ptr(new QueryRequest("SELECT * FROM table")), response_future_fail));
+ processor->process_request(request_handler_fail);
+ ASSERT_TRUE(response_future_fail->wait_for(WAIT_FOR_TIME));
+ ASSERT_TRUE(response_future_fail->error());
+ EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, response_future_fail->error()->code);
+
+ ASSERT_TRUE(response_future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(response_future->error());
+ AddressVec attempted = response_future->attempted_addresses();
+ ASSERT_GE(attempted.size(), 1u);
+ EXPECT_EQ(attempted[0], Address("127.0.0.2", PORT));
+ }
+
+ processor->close();
+ ASSERT_TRUE(close_future->wait_for(WAIT_FOR_TIME));
+}
diff --git a/gtests/src/unit/tests/test_session.cpp b/gtests/src/unit/tests/test_session.cpp
index 6693183cb..091b8c36e 100644
--- a/gtests/src/unit/tests/test_session.cpp
+++ b/gtests/src/unit/tests/test_session.cpp
@@ -47,10 +47,14 @@ class SessionUnitTest : public EventLoopTest {
outage_plan->stop_node(1, OUTAGE_PLAN_DELAY);
}
- void query_on_threads(Session* session) {
+ void query_on_threads(Session* session, bool is_chaotic = false) {
uv_thread_t threads[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; ++i) {
- ASSERT_EQ(0, uv_thread_create(&threads[i], query, session));
+ if (is_chaotic) {
+ ASSERT_EQ(0, uv_thread_create(&threads[i], query_is_chaotic, session));
+ } else {
+ ASSERT_EQ(0, uv_thread_create(&threads[i], query, session));
+ }
}
for (int i = 0; i < NUM_THREADS; ++i) {
uv_thread_join(&threads[i]);
@@ -73,7 +77,7 @@ class SessionUnitTest : public EventLoopTest {
for (size_t i = 1; i <= num_nodes; ++i) {
OStringStream ss;
ss << "127.0.0." << i;
- config.contact_points().push_back(ss.str());
+ config.contact_points().push_back(Address(ss.str(), 9042));
}
if (ssl_context) {
config.set_ssl_context(ssl_context);
@@ -89,14 +93,21 @@ class SessionUnitTest : public EventLoopTest {
<< cass_error_desc(close_future->error()->code) << ": " << close_future->error()->message;
}
- static void query(Session* session) {
+ static void query(Session* session, bool is_chaotic = false) {
QueryRequest::Ptr request(new QueryRequest("blah", 0));
request->set_is_idempotent(true);
Future::Ptr future = session->execute(request, NULL);
ASSERT_TRUE(future->wait_for(WAIT_FOR_TIME)) << "Timed out executing query";
- ASSERT_FALSE(future->error()) << cass_error_desc(future->error()->code) << ": "
- << future->error()->message;
+ if (future->error()) fprintf(stderr, "%s\n", cass_error_desc(future->error()->code));
+ if (is_chaotic) {
+ ASSERT_TRUE(future->error() == NULL ||
+ future->error()->code == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE)
+ << cass_error_desc(future->error()->code) << ": " << future->error()->message;
+ } else {
+ ASSERT_FALSE(future->error())
+ << cass_error_desc(future->error()->code) << ": " << future->error()->message;
+ }
}
// uv_thread_create
@@ -104,6 +115,25 @@ class SessionUnitTest : public EventLoopTest {
Session* session = static_cast(arg);
query(session);
}
+ static void query_is_chaotic(void* arg) {
+ Session* session = static_cast(arg);
+ query(session, true);
+ }
+
+ bool check_consistency(const Session& session, CassConsistency expected_consistency,
+ CassConsistency expected_profile_consistency) {
+ Config session_config = session.config();
+ EXPECT_EQ(expected_consistency, session_config.consistency());
+
+ const ExecutionProfile::Map& profiles = session_config.profiles();
+ for (ExecutionProfile::Map::const_iterator it = profiles.begin(), end = profiles.end();
+ it != end; ++it) {
+ if (expected_profile_consistency != it->second.consistency()) {
+ return false;
+ }
+ }
+ return true;
+ }
class HostEventFuture : public Future {
public:
@@ -195,6 +225,54 @@ class SessionUnitTest : public EventLoopTest {
uv_mutex_t mutex_;
EventQueue events_;
};
+
+ class LocalDcClusterMetadataResolver : public ClusterMetadataResolver {
+ public:
+ LocalDcClusterMetadataResolver(const String& local_dc)
+ : desired_local_dc_(local_dc) {}
+
+ private:
+ virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) {
+ resolved_contact_points_ = contact_points;
+ local_dc_ = desired_local_dc_;
+ callback_(this);
+ }
+
+ virtual void internal_cancel() {}
+
+ private:
+ String desired_local_dc_;
+ };
+
+ class LocalDcClusterMetadataResolverFactory : public ClusterMetadataResolverFactory {
+ public:
+ LocalDcClusterMetadataResolverFactory(const String& local_dc)
+ : local_dc_(local_dc) {}
+
+ virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const {
+ return ClusterMetadataResolver::Ptr(new LocalDcClusterMetadataResolver(local_dc_));
+ }
+
+ virtual const char* name() const { return "LocalDc"; }
+
+ private:
+ String local_dc_;
+ };
+
+ class SupportedDbaasOptions : public mockssandra::Action {
+ public:
+ virtual void on_run(mockssandra::Request* request) const {
+ Vector product_type;
+ product_type.push_back("DATASTAX_APOLLO");
+
+ StringMultimap supported;
+ supported["PRODUCT_TYPE"] = product_type;
+
+ String body;
+ mockssandra::encode_string_map(supported, &body);
+ request->write(mockssandra::OPCODE_SUPPORTED, body);
+ }
+ };
};
TEST_F(SessionUnitTest, ExecuteQueryNotConnected) {
@@ -216,7 +294,7 @@ TEST_F(SessionUnitTest, InvalidKeyspace) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
Session session;
Future::Ptr connect_future(session.connect(config, "invalid"));
@@ -231,7 +309,7 @@ TEST_F(SessionUnitTest, InvalidDataCenter) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_load_balancing_policy(new DCAwarePolicy("invalid_data_center", 0, false));
Session session;
@@ -248,7 +326,7 @@ TEST_F(SessionUnitTest, InvalidLocalAddress) {
Config config;
config.set_local_address(Address("1.1.1.1", PORT)); // Invalid
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_load_balancing_policy(new DCAwarePolicy("invalid_data_center", 0, false));
Session session;
@@ -295,7 +373,7 @@ TEST_F(SessionUnitTest, ExecuteQueryReusingSessionChaotic) {
Future::Ptr outage_future = execute_outage_plan(&outage_plan);
while (!outage_future->wait_for(1000)) { // 1 millisecond wait
connect(&session, NULL, WAIT_FOR_TIME * 3, 4);
- query(&session);
+ query(&session, true);
close(&session, WAIT_FOR_TIME * 3);
}
}
@@ -312,7 +390,7 @@ TEST_F(SessionUnitTest, ExecuteQueryReusingSessionUsingSslChaotic) {
Future::Ptr outage_future = execute_outage_plan(&outage_plan);
while (!outage_future->wait_for(1000)) { // 1 millisecond wait
connect(&session, ssl_context.get(), WAIT_FOR_TIME * 3, 4);
- query(&session);
+ query(&session, true);
close(&session, WAIT_FOR_TIME * 3);
}
}
@@ -360,7 +438,8 @@ TEST_F(SessionUnitTest, ExecuteQueryWithCompleteOutageSpinDown) {
QueryRequest::Ptr request(new QueryRequest("blah", 0));
Future::Ptr future = session.execute(request, NULL);
ASSERT_TRUE(future->wait_for(WAIT_FOR_TIME));
- ASSERT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, future->error()->code);
+ EXPECT_TRUE(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE == future->error()->code ||
+ CASS_ERROR_LIB_REQUEST_TIMED_OUT == future->error()->code);
// Restart a node and execute query to ensure session recovers
ASSERT_EQ(cluster.start(2), 0);
@@ -403,7 +482,7 @@ TEST_F(SessionUnitTest, ExecuteQueryWithThreadsChaotic) {
Future::Ptr outage_future = execute_outage_plan(&outage_plan);
while (!outage_future->wait_for(1000)) { // 1 millisecond wait
- query_on_threads(&session);
+ query_on_threads(&session, true);
}
close(&session);
@@ -422,7 +501,7 @@ TEST_F(SessionUnitTest, ExecuteQueryWithThreadsUsingSslChaotic) {
Future::Ptr outage_future = execute_outage_plan(&outage_plan);
while (!outage_future->wait_for(1000)) { // 1 millisecond wait
- query_on_threads(&session);
+ query_on_threads(&session, true);
}
close(&session);
@@ -436,7 +515,7 @@ TEST_F(SessionUnitTest, HostListener) {
Config config;
config.set_constant_reconnect(100); // Reconnect immediately
- config.contact_points().push_back("127.0.0.2");
+ config.contact_points().push_back(Address("127.0.0.2", 9042));
config.set_host_listener(listener);
Session session;
@@ -494,7 +573,7 @@ TEST_F(SessionUnitTest, HostListenerDCAwareLocal) {
Config config;
config.set_constant_reconnect(100); // Reconnect immediately
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_host_listener(listener);
Session session;
@@ -531,7 +610,7 @@ TEST_F(SessionUnitTest, HostListenerDCAwareRemote) {
Config config;
config.set_constant_reconnect(100); // Reconnect immediately
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_load_balancing_policy(new DCAwarePolicy("dc1", 1, false));
config.set_host_listener(listener);
@@ -573,7 +652,7 @@ TEST_F(SessionUnitTest, HostListenerNodeDown) {
Config config;
config.set_constant_reconnect(100); // Reconnect immediately
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_host_listener(listener);
Session session;
@@ -609,3 +688,298 @@ TEST_F(SessionUnitTest, HostListenerNodeDown) {
ASSERT_EQ(0u, listener->event_count());
}
+
+TEST_F(SessionUnitTest, LocalDcUpdatedOnPolicy) {
+ mockssandra::SimpleCluster cluster(simple(), 3, 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ TestHostListener::Ptr listener(new TestHostListener());
+
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.4", 9042));
+ config.set_cluster_metadata_resolver_factory(
+ ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2")));
+ config.set_host_listener(listener);
+
+ Session session;
+ connect(config, &session);
+
+ { // Initial nodes available from peers table (should skip DC1)
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.4", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.4", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ }
+
+ for (int i = 0; i < 20; ++i) { // Validate the request processors are using DC2 only
+ QueryRequest::Ptr request(new QueryRequest("blah", 0));
+
+ ResponseFuture::Ptr future = session.execute(request, NULL);
+ EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(future->error());
+ EXPECT_EQ("127.0.0.4", future->address().to_string());
+ }
+
+ close(&session);
+
+ ASSERT_EQ(0u, listener->event_count());
+}
+
+TEST_F(SessionUnitTest, LocalDcNotOverriddenOnPolicy) {
+ mockssandra::SimpleCluster cluster(simple(), 1, 3);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ TestHostListener::Ptr listener(new TestHostListener());
+
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
+ config.set_load_balancing_policy(new DCAwarePolicy("dc1"));
+ config.set_cluster_metadata_resolver_factory(
+ ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2")));
+ config.set_host_listener(listener);
+
+ Session session;
+ connect(config, &session);
+
+ { // Initial nodes available from peers table (should be DC1)
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.1", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.1", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ }
+
+ for (int i = 0; i < 20; ++i) { // Validate the request processors are using DC1 only
+ QueryRequest::Ptr request(new QueryRequest("blah", 0));
+
+ ResponseFuture::Ptr future = session.execute(request, NULL);
+ EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(future->error());
+ EXPECT_EQ("127.0.0.1", future->address().to_string());
+ }
+
+ close(&session);
+
+ ASSERT_EQ(0u, listener->event_count());
+}
+
+TEST_F(SessionUnitTest, LocalDcOverriddenOnPolicyUsingExecutionProfiles) {
+ mockssandra::SimpleCluster cluster(simple(), 3, 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ TestHostListener::Ptr listener(new TestHostListener());
+
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.4", 9042));
+ config.set_use_randomized_contact_points(
+ false); // Ensure round robin order over DC for query execution
+ config.set_cluster_metadata_resolver_factory(
+ ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2")));
+ config.set_host_listener(listener);
+
+ ExecutionProfile profile;
+ profile.set_load_balancing_policy(new DCAwarePolicy());
+ config.set_execution_profile("use_propagated_local_dc", &profile);
+
+ Session session;
+ connect(config, &session);
+
+ { // Initial nodes available from peers table (should be DC2)
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.4", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.4", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ }
+
+ for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC2 only
+ QueryRequest::Ptr request(new QueryRequest("blah", 0));
+
+ ResponseFuture::Ptr future = session.execute(request, NULL);
+ EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(future->error());
+ EXPECT_EQ("127.0.0.4", future->address().to_string());
+ }
+
+ for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC2 only
+ QueryRequest::Ptr request(new QueryRequest("blah", 0));
+ request->set_execution_profile_name("use_propagated_local_dc");
+
+ ResponseFuture::Ptr future = session.execute(request, NULL);
+ EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(future->error());
+ EXPECT_EQ("127.0.0.4", future->address().to_string());
+ }
+
+ close(&session);
+
+ ASSERT_EQ(0u, listener->event_count());
+}
+
+TEST_F(SessionUnitTest, LocalDcNotOverriddenOnPolicyUsingExecutionProfiles) {
+ mockssandra::SimpleCluster cluster(simple(), 3, 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ TestHostListener::Ptr listener(new TestHostListener());
+
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.4", 9042));
+ config.set_use_randomized_contact_points(
+ false); // Ensure round robin order over DC for query execution
+ config.set_cluster_metadata_resolver_factory(
+ ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2")));
+ config.set_host_listener(listener);
+
+ ExecutionProfile profile;
+ profile.set_load_balancing_policy(new DCAwarePolicy("dc1"));
+ config.set_execution_profile("use_dc1", &profile);
+
+ Session session;
+ connect(config, &session);
+
+ { // Initial nodes available from peers table (should be DC1 and DC2)
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.1", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.1", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.2", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.2", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.3", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.3", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.4", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.4", 9042)),
+ listener->wait_for_event(WAIT_FOR_TIME));
+ }
+
+ for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC2 only
+ QueryRequest::Ptr request(new QueryRequest("blah", 0));
+
+ ResponseFuture::Ptr future = session.execute(request, NULL);
+ EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(future->error());
+ EXPECT_EQ("127.0.0.4", future->address().to_string());
+ }
+
+ for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC1 only
+ QueryRequest::Ptr request(new QueryRequest("blah", 0));
+ request->set_execution_profile_name("use_dc1");
+
+ ResponseFuture::Ptr future = session.execute(request, NULL);
+ EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME));
+ EXPECT_FALSE(future->error());
+ EXPECT_NE("127.0.0.4", future->address().to_string());
+ }
+
+ close(&session);
+
+ ASSERT_EQ(0u, listener->event_count());
+}
+
+TEST_F(SessionUnitTest, NoContactPoints) {
+ // No cluster needed
+
+ Config config;
+ config.contact_points().clear();
+
+ Session session;
+ Future::Ptr connect_future(session.connect(config));
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME))
+ << "Timed out waiting for session to connect";
+ ASSERT_TRUE(connect_future->error());
+ EXPECT_EQ(connect_future->error()->code, CASS_ERROR_LIB_NO_HOSTS_AVAILABLE);
+}
+
+TEST_F(SessionUnitTest, DefaultConsistency) {
+ mockssandra::SimpleCluster cluster(simple());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Session session;
+ {
+ Config session_config = session.config();
+ EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency());
+ }
+
+ ExecutionProfile profile;
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
+ config.set_execution_profile("profile", &profile);
+ connect(config, &session);
+
+ EXPECT_TRUE(check_consistency(session, CASS_DEFAULT_CONSISTENCY, CASS_DEFAULT_CONSISTENCY));
+
+ close(&session);
+}
+
+TEST_F(SessionUnitTest, DefaultConsistencyExecutionProfileNotUpdated) {
+ mockssandra::SimpleCluster cluster(simple());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Session session;
+ {
+ Config session_config = session.config();
+ EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency());
+ }
+
+ ExecutionProfile profile;
+ profile.set_consistency(CASS_CONSISTENCY_LOCAL_QUORUM);
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
+ config.set_execution_profile("profile", &profile);
+ connect(config, &session);
+
+ EXPECT_TRUE(check_consistency(session, CASS_DEFAULT_CONSISTENCY, CASS_CONSISTENCY_LOCAL_QUORUM));
+
+ close(&session);
+}
+
+TEST_F(SessionUnitTest, DbaasDetectionUpdateDefaultConsistency) {
+ mockssandra::SimpleRequestHandlerBuilder builder;
+ builder.on(mockssandra::OPCODE_OPTIONS).execute(new SupportedDbaasOptions());
+ mockssandra::SimpleCluster cluster(builder.build());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Session session;
+ {
+ Config session_config = session.config();
+ EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency());
+ }
+
+ ExecutionProfile profile;
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
+ config.set_execution_profile("profile", &profile);
+ connect(config, &session);
+
+ EXPECT_TRUE(
+ check_consistency(session, CASS_DEFAULT_DBAAS_CONSISTENCY, CASS_DEFAULT_DBAAS_CONSISTENCY));
+
+ close(&session);
+}
+
+TEST_F(SessionUnitTest, DbaasDefaultConsistencyExecutionProfileNotUpdate) {
+ mockssandra::SimpleRequestHandlerBuilder builder;
+ builder.on(mockssandra::OPCODE_OPTIONS).execute(new SupportedDbaasOptions());
+ mockssandra::SimpleCluster cluster(builder.build());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Session session;
+ {
+ Config session_config = session.config();
+ EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency());
+ }
+
+ ExecutionProfile profile;
+ profile.set_consistency(CASS_CONSISTENCY_LOCAL_ONE);
+ Config config;
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
+ config.set_execution_profile("profile", &profile);
+ connect(config, &session);
+
+ EXPECT_TRUE(
+ check_consistency(session, CASS_DEFAULT_DBAAS_CONSISTENCY, CASS_CONSISTENCY_LOCAL_ONE));
+
+ close(&session);
+}
diff --git a/gtests/src/unit/tests/test_session_base.cpp b/gtests/src/unit/tests/test_session_base.cpp
index 031bec368..2a1820b70 100644
--- a/gtests/src/unit/tests/test_session_base.cpp
+++ b/gtests/src/unit/tests/test_session_base.cpp
@@ -44,25 +44,27 @@ class TestSessionBase : public SessionBase {
protected:
virtual void on_connect(const Host::Ptr& connected_host, ProtocolVersion protocol_version,
- const HostMap& hosts, const TokenMap::Ptr& token_map) {
+ const HostMap& hosts, const TokenMap::Ptr& token_map,
+ const String& local_dc) {
++connected_;
- ASSERT_STREQ("127.0.0.1", connected_host->address_string().c_str());
- ASSERT_EQ(ProtocolVersion(PROTOCOL_VERSION), protocol_version);
- ASSERT_EQ(1u, hosts.size());
- ASSERT_EQ(state(), SESSION_STATE_CONNECTING);
+
+ EXPECT_STREQ("127.0.0.1", connected_host->address_string().c_str());
+ EXPECT_EQ(ProtocolVersion(PROTOCOL_VERSION), protocol_version);
+ EXPECT_EQ(1u, hosts.size());
+ EXPECT_EQ(state(), SESSION_STATE_CONNECTING);
notify_connected();
}
virtual void on_connect_failed(CassError code, const String& message) {
++failed_;
- ASSERT_EQ(state(), SESSION_STATE_CONNECTING);
+ EXPECT_EQ(state(), SESSION_STATE_CONNECTING);
notify_connect_failed(code, message);
- ASSERT_EQ(state(), SESSION_STATE_CLOSED);
+ EXPECT_EQ(state(), SESSION_STATE_CLOSED);
}
virtual void on_close() {
++closed_;
- ASSERT_EQ(state(), SESSION_STATE_CLOSING);
+ EXPECT_EQ(state(), SESSION_STATE_CLOSING);
notify_closed();
}
@@ -79,7 +81,7 @@ TEST_F(SessionBaseUnitTest, Simple) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
@@ -103,7 +105,7 @@ TEST_F(SessionBaseUnitTest, SimpleEmptyKeyspaceWithoutRandom) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_use_randomized_contact_points(false);
TestSessionBase session_base;
@@ -129,7 +131,7 @@ TEST_F(SessionBaseUnitTest, Ssl) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_ssl_context(settings.socket_settings.ssl_context.get());
TestSessionBase session_base;
@@ -155,8 +157,8 @@ TEST_F(SessionBaseUnitTest, SimpleInvalidContactPointsIp) {
Config config;
config.set_use_randomized_contact_points(false);
- config.contact_points().push_back("123.456.789.012");
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("123.456.789.012", 9042));
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
@@ -179,8 +181,8 @@ TEST_F(SessionBaseUnitTest, SimpleInvalidContactPointsHostname) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("doesnotexist.dne");
- config.contact_points().push_back("localhost");
+ config.contact_points().push_back(Address("doesnotexist.dne", 9042));
+ config.contact_points().push_back(Address("localhost", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
@@ -205,7 +207,7 @@ TEST_F(SessionBaseUnitTest, InvalidProtocol) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
@@ -216,6 +218,25 @@ TEST_F(SessionBaseUnitTest, InvalidProtocol) {
EXPECT_EQ(0, session_base.closed());
}
+TEST_F(SessionBaseUnitTest, UnsupportedProtocol) {
+ mockssandra::SimpleCluster cluster(simple());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Config config;
+ config.set_protocol_version(ProtocolVersion(2)); // Unsupported protocol version
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
+ TestSessionBase session_base;
+
+ Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
+ ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME));
+ EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, connect_future->error()->code);
+ EXPECT_TRUE(connect_future->error()->message.find(
+ "Operation unsupported by this protocol version") != String::npos);
+ EXPECT_EQ(0, session_base.connected());
+ EXPECT_EQ(1, session_base.failed());
+ EXPECT_EQ(0, session_base.closed());
+}
+
TEST_F(SessionBaseUnitTest, SslError) {
mockssandra::SimpleCluster cluster(simple());
use_ssl(&cluster);
@@ -224,7 +245,7 @@ TEST_F(SessionBaseUnitTest, SslError) {
SslContext::Ptr invalid_ssl_context(SslContextFactory::create());
invalid_ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_CERT);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_ssl_context(invalid_ssl_context.get());
TestSessionBase session_base;
@@ -241,7 +262,7 @@ TEST_F(SessionBaseUnitTest, Auth) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
config.set_credentials("cassandra", "cassandra");
TestSessionBase session_base;
@@ -263,7 +284,7 @@ TEST_F(SessionBaseUnitTest, BadCredentials) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
@@ -276,7 +297,7 @@ TEST_F(SessionBaseUnitTest, BadCredentials) {
TEST_F(SessionBaseUnitTest, NoHostsAvailable) {
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
@@ -292,7 +313,7 @@ TEST_F(SessionBaseUnitTest, ConnectWhenAlreadyConnected) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
{
@@ -323,7 +344,7 @@ TEST_F(SessionBaseUnitTest, CloseWhenAlreadyClosed) {
ASSERT_EQ(cluster.start_all(), 0);
Config config;
- config.contact_points().push_back("127.0.0.1");
+ config.contact_points().push_back(Address("127.0.0.1", 9042));
TestSessionBase session_base;
Future::Ptr connect_future(session_base.connect(config, KEYSPACE));
diff --git a/gtests/src/unit/tests/test_socket.cpp b/gtests/src/unit/tests/test_socket.cpp
index 26f77d31a..176bdbd91 100644
--- a/gtests/src/unit/tests/test_socket.cpp
+++ b/gtests/src/unit/tests/test_socket.cpp
@@ -20,9 +20,56 @@
#include "socket_connector.hpp"
#include "ssl.hpp"
-#define SSL_VERIFY_PEER_DNS_RELATIVE_HOSTNAME "cpp-driver.hostname"
-#define SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME SSL_VERIFY_PEER_DNS_RELATIVE_HOSTNAME "."
-#define SSL_VERIFY_PEER_DNS_IP_ADDRESS "127.254.254.254"
+#define DNS_HOSTNAME "cpp-driver.hostname."
+#define DNS_IP_ADDRESS "127.254.254.254"
+
+using mockssandra::internal::ClientConnection;
+using mockssandra::internal::ClientConnectionFactory;
+using mockssandra::internal::ServerConnection;
+
+class CloseConnection : public ClientConnection {
+public:
+ CloseConnection(ServerConnection* server)
+ : ClientConnection(server) {}
+
+ virtual int on_accept() {
+ int rc = accept();
+ if (rc != 0) {
+ return rc;
+ }
+ close();
+ return rc;
+ }
+};
+
+class CloseConnectionFactory : public ClientConnectionFactory {
+public:
+ virtual ClientConnection* create(ServerConnection* server) const {
+ return new CloseConnection(server);
+ }
+};
+
+class SniServerNameConnection : public ClientConnection {
+public:
+ SniServerNameConnection(ServerConnection* server)
+ : ClientConnection(server) {}
+
+ virtual void on_read(const char* data, size_t len) {
+ const char* server_name = sni_server_name();
+ if (server_name) {
+ write(String(server_name) + " - Closed");
+ } else {
+ write(" - Closed");
+ }
+ }
+};
+
+class SniServerNameConnectionFactory : public ClientConnectionFactory {
+public:
+ virtual ClientConnection* create(ServerConnection* server) const {
+ return new SniServerNameConnection(server);
+ }
+};
using namespace datastax;
using namespace datastax::internal;
@@ -88,19 +135,27 @@ class SocketUnitTest : public LoopTest {
return settings;
}
- void listen() { ASSERT_EQ(server_.listen(), 0); }
-
- void reset(const Address& address) { server_.reset(address); }
+ void listen(const Address& address = Address("127.0.0.1", 8888)) {
+ ASSERT_EQ(server_.listen(address), 0);
+ }
void close() { server_.close(); }
- void use_close_immediately() { server_.use_close_immediately(); }
+ void use_close_immediately() { server_.use_connection_factory(new CloseConnectionFactory()); }
+ void use_sni_server_name() {
+ server_.use_connection_factory(new SniServerNameConnectionFactory());
+ }
virtual void TearDown() {
LoopTest::TearDown();
close();
}
+ bool verify_dns() {
+ verify_dns_check(); // Verify address can be resolved
+ return !HasFailure();
+ }
+
static void on_socket_connected(SocketConnector* connector, String* result) {
Socket::Ptr socket = connector->release_socket();
if (connector->error_code() == SocketConnector::SOCKET_OK) {
@@ -136,16 +191,32 @@ class SocketUnitTest : public LoopTest {
}
}
- static void on_request(uv_getnameinfo_t* handle, int status, const char* hostname,
- const char* service) {
+ static void on_request(uv_getaddrinfo_t* handle, int status, struct addrinfo* res) {
if (status) {
- FAIL() << "Unable to Execute Test SocketUnitTest.SslVerifyIdentityDns: "
- << "Add /etc/hosts entry " << SSL_VERIFY_PEER_DNS_IP_ADDRESS << "\t"
- << SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME;
- } else if (String(hostname) != String(SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME)) {
- FAIL() << "Invalid /etc/hosts entry for: '" << hostname << "' != '"
- << SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME << "'";
+ FAIL() << "Unable to Execute Test: "
+ << "Add /etc/hosts entry " << DNS_IP_ADDRESS << "\t" << DNS_HOSTNAME;
+ } else {
+ bool match = false;
+ do {
+ Address address(res->ai_addr);
+ if (address.is_valid_and_resolved() && address == Address(DNS_IP_ADDRESS, 8888)) {
+ match = true;
+ break;
+ }
+ res = res->ai_next;
+ } while (res);
+ ASSERT_TRUE(match) << "Invalid /etc/hosts entry for: '" << DNS_HOSTNAME << "' != '"
+ << DNS_IP_ADDRESS << "'";
}
+ uv_freeaddrinfo(res);
+ }
+
+private:
+ void verify_dns_check() {
+ uv_getaddrinfo_t request;
+ Address::SocketStorage storage;
+ ASSERT_EQ(0, uv_getaddrinfo(loop(), &request, on_request, DNS_HOSTNAME, "8888", NULL));
+ uv_run(loop(), UV_RUN_DEFAULT);
}
private:
@@ -166,11 +237,27 @@ TEST_F(SocketUnitTest, Simple) {
EXPECT_EQ(result, "The socket is successfully connected and wrote data - Closed");
}
-TEST_F(SocketUnitTest, Ssl) {
- listen();
+TEST_F(SocketUnitTest, SimpleDns) {
+ if (!verify_dns()) return;
+
+ listen(Address(DNS_IP_ADDRESS, 8888));
+
+ String result;
+ SocketConnector::Ptr connector(new SocketConnector(Address(DNS_HOSTNAME, 8888),
+ bind_callback(on_socket_connected, &result)));
+ connector->connect(loop());
+
+ uv_run(loop(), UV_RUN_DEFAULT);
+
+ EXPECT_EQ(result, "The socket is successfully connected and wrote data - Closed");
+}
+
+TEST_F(SocketUnitTest, Ssl) {
SocketSettings settings(use_ssl());
+ listen();
+
String result;
SocketConnector::Ptr connector(
new SocketConnector(Address("127.0.0.1", 8888), bind_callback(on_socket_connected, &result)));
@@ -182,6 +269,24 @@ TEST_F(SocketUnitTest, Ssl) {
EXPECT_EQ(result, "The socket is successfully connected and wrote data - Closed");
}
+TEST_F(SocketUnitTest, SslSniServerName) {
+ SocketSettings settings(use_ssl());
+
+ use_sni_server_name();
+ listen();
+
+ String result;
+ SocketConnector::Ptr connector(
+ new SocketConnector(Address("127.0.0.1", 8888, "TestSniServerName"),
+ bind_callback(on_socket_connected, &result)));
+
+ connector->with_settings(settings)->connect(loop());
+
+ uv_run(loop(), UV_RUN_DEFAULT);
+
+ EXPECT_EQ(result, "TestSniServerName - Closed");
+}
+
TEST_F(SocketUnitTest, Refused) {
bool is_refused = false;
SocketConnector::Ptr connector(new SocketConnector(
@@ -194,11 +299,11 @@ TEST_F(SocketUnitTest, Refused) {
}
TEST_F(SocketUnitTest, SslClose) {
+ SocketSettings settings(use_ssl());
+
use_close_immediately();
listen();
- SocketSettings settings(use_ssl());
-
Vector connectors;
bool is_closed = false;
@@ -241,10 +346,10 @@ TEST_F(SocketUnitTest, Cancel) {
}
TEST_F(SocketUnitTest, SslCancel) {
- listen();
-
SocketSettings settings(use_ssl());
+ listen();
+
Vector connectors;
bool is_canceled = false;
@@ -268,9 +373,10 @@ TEST_F(SocketUnitTest, SslCancel) {
}
TEST_F(SocketUnitTest, SslVerifyIdentity) {
+ SocketSettings settings(use_ssl("127.0.0.1"));
+
listen();
- SocketSettings settings(use_ssl("127.0.0.1"));
settings.ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_IDENTITY);
String result;
@@ -285,26 +391,17 @@ TEST_F(SocketUnitTest, SslVerifyIdentity) {
}
TEST_F(SocketUnitTest, SslVerifyIdentityDns) {
- // Verify address can be resolved
- Address verify_entry;
- Address::from_string(SSL_VERIFY_PEER_DNS_IP_ADDRESS, 8888, &verify_entry);
- uv_getnameinfo_t request;
- ASSERT_EQ(0, uv_getnameinfo(loop(), &request, on_request,
- static_cast(verify_entry).addr(), 0));
- uv_run(loop(), UV_RUN_DEFAULT);
- if (this->HasFailure()) { // Make test fail due to DNS not configured
- return;
- }
+ if (!verify_dns()) return;
- reset(Address(SSL_VERIFY_PEER_DNS_IP_ADDRESS,
- 8888)); // Ensure the echo server is listening on the correct address
- listen();
+ SocketSettings settings(use_ssl(DNS_HOSTNAME));
+
+ listen(Address(DNS_IP_ADDRESS, 8888));
- SocketSettings settings(use_ssl(SSL_VERIFY_PEER_DNS_RELATIVE_HOSTNAME));
settings.ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_IDENTITY_DNS);
+ settings.resolve_timeout_ms = 12000;
String result;
- SocketConnector::Ptr connector(new SocketConnector(Address(SSL_VERIFY_PEER_DNS_IP_ADDRESS, 8888),
+ SocketConnector::Ptr connector(new SocketConnector(Address(DNS_HOSTNAME, 8888),
bind_callback(on_socket_connected, &result)));
connector->with_settings(settings)->connect(loop());
diff --git a/gtests/src/unit/tests/test_startup_options.cpp b/gtests/src/unit/tests/test_startup_options.cpp
index 5288bdc2a..cffc77178 100644
--- a/gtests/src/unit/tests/test_startup_options.cpp
+++ b/gtests/src/unit/tests/test_startup_options.cpp
@@ -56,7 +56,7 @@ class StartupRequestUnitTest : public Unit {
}
void connect() {
- config_.contact_points().push_back("127.0.0.1");
+ config_.contact_points().push_back(Address("127.0.0.1", 9042));
internal::core::Future::Ptr connect_future(session_.connect(config_));
ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME))
<< "Timed out waiting for session to connect";
diff --git a/gtests/src/unit/tests/test_statement.cpp b/gtests/src/unit/tests/test_statement.cpp
index 7abd83a9c..891aac959 100644
--- a/gtests/src/unit/tests/test_statement.cpp
+++ b/gtests/src/unit/tests/test_statement.cpp
@@ -16,6 +16,7 @@
#include "unit.hpp"
+#include "batch_request.hpp"
#include "constants.hpp"
#include "control_connection.hpp"
#include "query_request.hpp"
@@ -32,8 +33,9 @@ class StatementUnitTest : public Unit {
void connect(const Config& config = Config()) {
Config temp(config);
- temp.contact_points().push_back("127.0.0.1");
- temp.contact_points().push_back("127.0.0.2"); // At least one more host (in case node 1 is down)
+ temp.contact_points().push_back(Address("127.0.0.1", 9042));
+ temp.contact_points().push_back(
+ Address("127.0.0.2", 9042)); // At least one more host (in case node 1 is down)
Future::Ptr connect_future(session.connect(temp));
ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME))
<< "Timed out waiting for session to connect";
@@ -54,7 +56,8 @@ class StatementUnitTest : public Unit {
CassInet inet;
ASSERT_TRUE(value->decoder().as_inet(value->size(), &inet));
- ASSERT_TRUE(Address::from_inet(inet.address, inet.address_length, 9042, output));
+ *output = Address(inet.address, inet.address_length, 9042);
+ ASSERT_TRUE(output->is_valid_and_resolved());
}
Session session;
@@ -121,3 +124,45 @@ TEST_F(StatementUnitTest, SetHostWhereHostIsDown) {
ASSERT_TRUE(future->error());
EXPECT_EQ(future->error()->code, CASS_ERROR_LIB_NO_HOSTS_AVAILABLE);
}
+
+TEST_F(StatementUnitTest, ErrorBatchWithNamedParameters) {
+ mockssandra::SimpleCluster cluster(simple(), 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ connect();
+
+ BatchRequest::Ptr batch(new BatchRequest(CASS_BATCH_TYPE_UNLOGGED));
+
+ Statement::Ptr request(new QueryRequest("SELECT * FROM does_not_matter WHERE key = ?",
+ 1)); // Space for a named parameter
+
+ request->set("key", 42); // Use named parameters
+
+ batch->add_statement(request.get());
+
+ ResponseFuture::Ptr future(session.execute(Request::ConstPtr(batch)));
+ future->wait();
+
+ ASSERT_TRUE(future->error());
+ EXPECT_EQ(future->error()->code, CASS_ERROR_LIB_BAD_PARAMS);
+ EXPECT_EQ(future->error()->message, "Batches cannot contain queries with named values");
+}
+
+TEST_F(StatementUnitTest, ErrorParametersUnset) {
+ mockssandra::SimpleCluster cluster(simple(), 1);
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ Config config;
+ config.set_protocol_version(ProtocolVersion(3));
+
+ connect(config);
+
+ Statement::Ptr request(new QueryRequest("SELECT * FROM does_not_matter WHERE key = ?",
+ 1)); // Parameters start as unset
+
+ ResponseFuture::Ptr future(session.execute(Request::ConstPtr(request)));
+ future->wait();
+
+ ASSERT_TRUE(future->error());
+ EXPECT_EQ(future->error()->code, CASS_ERROR_LIB_PARAMETER_UNSET);
+}
diff --git a/gtests/src/unit/tests/test_supported_response.cpp b/gtests/src/unit/tests/test_supported_response.cpp
new file mode 100644
index 000000000..16911e498
--- /dev/null
+++ b/gtests/src/unit/tests/test_supported_response.cpp
@@ -0,0 +1,131 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ This software can be used solely with DataStax Enterprise. Please consult the
+ license at http://www.datastax.com/terms/datastax-dse-driver-license-terms
+*/
+
+#include "loop_test.hpp"
+
+#include "options_request.hpp"
+#include "request_callback.hpp"
+#include "supported_response.hpp"
+
+using namespace datastax;
+using namespace datastax::internal;
+using namespace datastax::internal::core;
+
+class SupportedResponseUnitTest : public LoopTest {
+public:
+ const mockssandra::RequestHandler* simple_cluster_with_options() {
+ mockssandra::SimpleRequestHandlerBuilder builder;
+ builder.on(mockssandra::OPCODE_OPTIONS).execute(new SupportedOptions());
+ return builder.build();
+ }
+
+public:
+ static void on_connect(Connector* connector, StringMultimap* supported_options) {
+ ASSERT_TRUE(connector->is_ok());
+ *supported_options = connector->supported_options();
+ }
+
+private:
+ class SupportedOptions : public mockssandra::Action {
+ public:
+ virtual void on_run(mockssandra::Request* request) const {
+ Vector compression;
+ Vector cql_version;
+ Vector protocol_versions;
+ compression.push_back("snappy");
+ compression.push_back("lz4");
+ cql_version.push_back("3.4.5");
+ protocol_versions.push_back("3/v3");
+ protocol_versions.push_back("4/v4");
+
+ StringMultimap supported;
+ supported["COMPRESSION"] = compression;
+ supported["CQL_VERSION"] = cql_version;
+ supported["PROTOCOL_VERSIONS"] = protocol_versions;
+
+ String body;
+ mockssandra::encode_string_map(supported, &body);
+ request->write(mockssandra::OPCODE_SUPPORTED, body);
+ }
+ };
+};
+
+TEST_F(SupportedResponseUnitTest, Simple) {
+ mockssandra::SimpleCluster cluster(simple_cluster_with_options());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ StringMultimap supported_options;
+ ASSERT_EQ(0, supported_options.size());
+ Connector::Ptr connector(new Connector(Host::Ptr(new Host(Address("127.0.0.1", PORT))),
+ PROTOCOL_VERSION,
+ bind_callback(on_connect, &supported_options)));
+ connector->connect(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+
+ ASSERT_EQ(3u, supported_options.size());
+ {
+ Vector compression = supported_options.find("COMPRESSION")->second;
+ ASSERT_EQ(2u, compression.size());
+ EXPECT_EQ("snappy", compression[0]);
+ EXPECT_EQ("lz4", compression[1]);
+ }
+ {
+ Vector cql_version = supported_options.find("CQL_VERSION")->second;
+ ASSERT_EQ(1u, cql_version.size());
+ EXPECT_EQ("3.4.5", cql_version[0]);
+ }
+ {
+ Vector protocol_versions = supported_options.find("PROTOCOL_VERSIONS")->second;
+ ASSERT_EQ(2u, protocol_versions.size());
+ EXPECT_EQ("3/v3", protocol_versions[0]);
+ EXPECT_EQ("4/v4", protocol_versions[1]);
+ }
+
+ { // Non-existent key
+ EXPECT_EQ(supported_options.end(), supported_options.find("invalid"));
+ }
+}
+
+TEST_F(SupportedResponseUnitTest, UppercaseKeysOnly) {
+ class CaseInsensitiveSupportedOptions : public mockssandra::Action {
+ public:
+ virtual void on_run(mockssandra::Request* request) const {
+ Vector camel_key;
+ camel_key.push_back("success");
+
+ StringMultimap supported;
+ supported["CamEL_KeY"] = camel_key;
+
+ String body;
+ mockssandra::encode_string_map(supported, &body);
+ request->write(mockssandra::OPCODE_SUPPORTED, body);
+ }
+ };
+
+ mockssandra::SimpleRequestHandlerBuilder builder;
+ builder.on(mockssandra::OPCODE_OPTIONS).execute(new CaseInsensitiveSupportedOptions());
+ mockssandra::SimpleCluster cluster(builder.build());
+ ASSERT_EQ(cluster.start_all(), 0);
+
+ StringMultimap supported_options;
+ ASSERT_EQ(0, supported_options.size());
+ Connector::Ptr connector(new Connector(Host::Ptr(new Host(Address("127.0.0.1", PORT))),
+ PROTOCOL_VERSION,
+ bind_callback(on_connect, &supported_options)));
+ connector->connect(loop());
+ uv_run(loop(), UV_RUN_DEFAULT);
+
+ ASSERT_EQ(1u, supported_options.size());
+ { // Uppercase
+ Vector uppercase = supported_options.find("CAMEL_KEY")->second;
+ ASSERT_EQ(1u, uppercase.size());
+ EXPECT_EQ("success", uppercase[0]);
+ }
+ { // Exact key
+ EXPECT_EQ(supported_options.end(), supported_options.find("CamEL_KeY"));
+ }
+}
diff --git a/gtests/src/unit/tests/test_tracing.cpp b/gtests/src/unit/tests/test_tracing.cpp
index 1943e0eed..5b4dc9cc4 100644
--- a/gtests/src/unit/tests/test_tracing.cpp
+++ b/gtests/src/unit/tests/test_tracing.cpp
@@ -31,7 +31,7 @@ class TracingUnitTest : public Unit {
void connect(const Config& config = Config()) {
Config temp(config);
- temp.contact_points().push_back("127.0.0.1");
+ temp.contact_points().push_back(Address("127.0.0.1", 9042));
Future::Ptr connect_future(session.connect(temp));
ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME))
<< "Timed out waiting for session to connect";
diff --git a/gtests/src/unit/tests/test_value.cpp b/gtests/src/unit/tests/test_value.cpp
index cce7a06ce..949645f71 100644
--- a/gtests/src/unit/tests/test_value.cpp
+++ b/gtests/src/unit/tests/test_value.cpp
@@ -16,12 +16,15 @@
#include
+#include "buffer.hpp"
#include "cassandra.h"
+#include "string.hpp"
#include "value.hpp"
#include
using namespace datastax::internal::core;
+using namespace datastax;
// The following CassValue's are used in tests as "bad data".
@@ -107,3 +110,81 @@ TEST(ValueUnitTest, BadDecimal) {
EXPECT_EQ(cass_value_get_decimal(CassValue::to(&invalid_value), &varint, &varint_size, &scale),
CASS_ERROR_LIB_NOT_ENOUGH_DATA);
}
+
+TEST(ValueUnitTest, NullElementInCollectionList) {
+ const char input[12] = {
+ -1, -1, -1, -1, // Element 1 is NULL
+ 0, 0, 0, 4, 0, 0, 0, 2 // Size (int32_t) and contents of element 2
+ };
+ Decoder decoder(input, 12);
+ DataType::ConstPtr element_data_type(new DataType(CASS_VALUE_TYPE_INT));
+ CollectionType::ConstPtr data_type = CollectionType::list(element_data_type, false);
+ Value value(data_type, 2, decoder);
+ ASSERT_EQ(cass_true, cass_value_is_collection(CassValue::to(&value)));
+
+ CassIterator* it = cass_iterator_from_collection(CassValue::to(&value));
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ const CassValue* element = cass_iterator_get_value(it);
+ EXPECT_EQ(cass_true, cass_value_is_null(element));
+ cass_int32_t element_value;
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ EXPECT_EQ(CASS_OK, cass_value_get_int32(element, &element_value));
+ EXPECT_EQ(2, element_value);
+ cass_iterator_free(it);
+}
+
+TEST(ValueUnitTest, NullElementInCollectionMap) {
+ const char input[21] = {
+ -1, -1, -1, -1, // Key 1 is NULL
+ 0, 0, 0, 4, 0, 0, 0, 2, // Size (int32_t) and contents of value 1
+ 0, 0, 0, 1, 'a', // Key 2 is a
+ -1, -1, -1, -1 // Value 2 is NULL
+ };
+ Decoder decoder(input, 21);
+ DataType::ConstPtr key_data_type(new DataType(CASS_VALUE_TYPE_TEXT));
+ DataType::ConstPtr value_data_type(new DataType(CASS_VALUE_TYPE_INT));
+ CollectionType::ConstPtr data_type = CollectionType::map(key_data_type, value_data_type, false);
+ Value value(data_type, 2, decoder);
+ ASSERT_EQ(cass_true, cass_value_is_collection(CassValue::to(&value)));
+
+ CassIterator* it = cass_iterator_from_collection(CassValue::to(&value));
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ const CassValue* element = cass_iterator_get_value(it);
+ EXPECT_EQ(cass_true, cass_value_is_null(element));
+ cass_int32_t value_value;
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ EXPECT_EQ(CASS_OK, cass_value_get_int32(element, &value_value));
+ EXPECT_EQ(2, value_value);
+
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ element = cass_iterator_get_value(it);
+ const char* key_value = NULL;
+ size_t key_value_length = 0;
+ EXPECT_EQ(CASS_OK, cass_value_get_string(element, &key_value, &key_value_length));
+ EXPECT_EQ("a", String(key_value, key_value_length));
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ EXPECT_EQ(cass_true, cass_value_is_null(element));
+ cass_iterator_free(it);
+}
+
+TEST(ValueUnitTest, NullElementInCollectionSet) {
+ const char input[12] = {
+ 0, 0, 0, 4, 0, 0, 0, 2, // Size (int32_t) and contents of element 1
+ -1, -1, -1, -1, // Element 2 is NULL
+ };
+ Decoder decoder(input, 12);
+ DataType::ConstPtr element_data_type(new DataType(CASS_VALUE_TYPE_INT));
+ CollectionType::ConstPtr data_type = CollectionType::set(element_data_type, false);
+ Value value(data_type, 2, decoder);
+ ASSERT_EQ(cass_true, cass_value_is_collection(CassValue::to(&value)));
+
+ CassIterator* it = cass_iterator_from_collection(CassValue::to(&value));
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ const CassValue* element = cass_iterator_get_value(it);
+ cass_int32_t element_value;
+ EXPECT_EQ(CASS_OK, cass_value_get_int32(element, &element_value));
+ EXPECT_EQ(2, element_value);
+ EXPECT_EQ(cass_true, cass_iterator_next(it));
+ EXPECT_EQ(cass_true, cass_value_is_null(element));
+ cass_iterator_free(it);
+}
diff --git a/include/cassandra.h b/include/cassandra.h
index b2d1770b6..a22da5e3e 100644
--- a/include/cassandra.h
+++ b/include/cassandra.h
@@ -52,7 +52,7 @@
*/
#define CASS_VERSION_MAJOR 2
-#define CASS_VERSION_MINOR 13
+#define CASS_VERSION_MINOR 14
#define CASS_VERSION_PATCH 0
#define CASS_VERSION_SUFFIX ""
@@ -700,7 +700,8 @@ typedef enum CassErrorSource_ {
XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_NO_PEER_CERT, 3, "No peer certificate") \
XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_INVALID_PEER_CERT, 4, "Invalid peer certificate") \
XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_IDENTITY_MISMATCH, 5, "Certificate does not match host or IP address") \
- XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_PROTOCOL_ERROR, 6, "Protocol error")
+ XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_PROTOCOL_ERROR, 6, "Protocol error") \
+ XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_CLOSED, 7, "Connection closed")
/* @cond IGNORE */
#define CASS_ERROR_MAP CASS_ERROR_MAPPING /* Deprecated */
@@ -2759,6 +2760,71 @@ cass_cluster_set_host_listener_callback(CassCluster* cluster,
CassHostListenerCallback callback,
void* data);
+/**
+ * Sets the secure connection bundle path for processing DBaaS credentials.
+ *
+ * This will pre-configure a cluster using the credentials format provided by
+ * the DBaaS cloud provider.
+ *
+ * @param[in] cluster
+ * @param[in] path Absolute path to DBaaS credentials file.
+ * @return CASS_OK if successful, otherwise error occured.
+ */
+CASS_EXPORT CassError
+cass_cluster_set_cloud_secure_connection_bundle(CassCluster* cluster,
+ const char* path);
+
+/**
+ * Same as cass_cluster_set_cloud_secure_connection_bundle(), but with lengths
+ * for string parameters.
+ *
+ * @see cass_cluster_set_cloud_secure_connection_bundle()
+ *
+ * @param[in] cluster
+ * @param[in] path Absolute path to DBaaS credentials file.
+ * @param[in] path_length Length of path variable.
+ * @return CASS_OK if successful, otherwise error occured.
+ */
+CASS_EXPORT CassError
+cass_cluster_set_cloud_secure_connection_bundle_n(CassCluster* cluster,
+ const char* path,
+ size_t path_length);
+
+/**
+ * Same as cass_cluster_set_cloud_secure_connection_bundle(), but it does not
+ * initialize the underlying SSL library implementation. The SSL library still
+ * needs to be initialized, but it's up to the client application to handle
+ * initialization. This is similar to the function cass_ssl_new_no_lib_init(),
+ * and its documentation should be used as a reference to properly initialize
+ * the underlying SSL library.
+ *
+ * @see cass_ssl_new_no_lib_init()
+ * @see cass_cluster_set_cloud_secure_connection_bundle()
+ *
+ * @param[in] cluster
+ * @param[in] path Absolute path to DBaaS credentials file.
+ * @return CASS_OK if successful, otherwise error occured.
+ */
+CASS_EXPORT CassError
+cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(CassCluster* cluster,
+ const char* path);
+
+/**
+ * Same as cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(),
+ * but with lengths for string parameters.
+ *
+ * @see cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init()
+ *
+ * @param[in] cluster
+ * @param[in] path Absolute path to DBaaS credentials file.
+ * @param[in] path_length Length of path variable.
+ * @return CASS_OK if successful, otherwise error occured.
+ */
+CASS_EXPORT CassError
+cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(CassCluster* cluster,
+ const char* path,
+ size_t path_length);
+
/***********************************************************************************
*
* Session
diff --git a/packaging/debian/rules b/packaging/debian/rules
index 62277dfe4..f4bcd60db 100755
--- a/packaging/debian/rules
+++ b/packaging/debian/rules
@@ -12,7 +12,8 @@ ifneq (,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
endif
export SOVER ?= $(shell dpkg-parsechangelog \
- | sed -rne 's/^Version: ([0-9.]+)[-+~].*$$/\1/p')
+ | sed -rne 's/^Version: ([0-9.]+)([-+~][[:alpha:]][[:alnum:]]*)?([-+~][[:digit:]])?$$/\1\2/p' \
+ | sed 's/[+~]/-/')
export SONAME=libcassandra.so.$(SOVER)
%:
diff --git a/src/address.cpp b/src/address.cpp
index 5d36a2954..752bd9ae3 100644
--- a/src/address.cpp
+++ b/src/address.cpp
@@ -21,187 +21,161 @@
#include "row.hpp"
#include "value.hpp"
-#include
-#include
-
using namespace datastax;
using namespace datastax::internal::core;
-const Address Address::EMPTY_KEY("0.0.0.0", 0);
-const Address Address::DELETED_KEY("0.0.0.0", 1);
-
-const Address Address::BIND_ANY_IPV4("0.0.0.0", 0);
-const Address Address::BIND_ANY_IPV6("::", 0);
+const Address Address::EMPTY_KEY(String(), 0);
+const Address Address::DELETED_KEY(String(), 1);
-Address::Address() { memset(&addr_, 0, sizeof(addr_)); }
+namespace {
-Address::Address(const String& ip, int port) {
- init();
- bool result = from_string(ip, port, this);
- UNUSED_(result);
- assert(result);
+template
+inline void hash_combine(std::size_t& seed, const T& v) {
+ SPARSEHASH_HASH hasher;
+ seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
-bool Address::from_string(const String& ip, int port, Address* output) {
- char buf[sizeof(struct in6_addr)];
- if (uv_inet_pton(AF_INET, ip.c_str(), &buf) == 0) {
- if (output != NULL) {
- struct sockaddr_in addr;
- uv_ip4_addr(ip.c_str(), port, &addr);
- output->init(&addr);
- }
- return true;
- } else if (uv_inet_pton(AF_INET6, ip.c_str(), &buf) == 0) {
- if (output != NULL) {
- struct sockaddr_in6 addr;
- uv_ip6_addr(ip.c_str(), port, &addr);
- output->init(&addr);
- }
- return true;
+} // namespace
+
+Address::Address()
+ : family_(UNRESOLVED)
+ , port_(0) {}
+
+Address::Address(const Address& other, const String& server_name)
+ : hostname_or_address_(other.hostname_or_address_)
+ , server_name_(server_name)
+ , family_(other.family_)
+ , port_(other.port_) {}
+
+Address::Address(const String& hostname, int port, const String& server_name)
+ : server_name_(server_name)
+ , family_(UNRESOLVED)
+ , port_(port) {
+ char addr[16];
+ if (uv_inet_pton(AF_INET, hostname.c_str(), addr) == 0) {
+ hostname_or_address_.assign(addr, addr + 4);
+ family_ = IPv4;
+ } else if (uv_inet_pton(AF_INET6, hostname.c_str(), addr) == 0) {
+ hostname_or_address_.assign(addr, addr + 16);
+ family_ = IPv6;
} else {
- return false;
+ hostname_or_address_ = hostname;
}
}
-bool Address::from_inet(const void* data, size_t size, int port, Address* output) {
-
- if (size == 4) {
- char buf[INET_ADDRSTRLEN];
- if (uv_inet_ntop(AF_INET, data, buf, sizeof(buf)) != 0) {
- return false;
- }
- if (output != NULL) {
- struct sockaddr_in addr;
- uv_ip4_addr(buf, port, &addr);
- output->init(&addr);
- }
-
- return true;
- } else if (size == 16) {
- char buf[INET6_ADDRSTRLEN];
- if (uv_inet_ntop(AF_INET6, data, buf, sizeof(buf)) != 0) {
- return false;
- }
- if (output != NULL) {
- struct sockaddr_in6 addr;
- uv_ip6_addr(buf, port, &addr);
- output->init(&addr);
- }
-
- return true;
+Address::Address(const uint8_t* address, uint8_t address_length, int port)
+ : family_(UNRESOLVED)
+ , port_(port) {
+ if (address_length == 4) {
+ hostname_or_address_.assign(reinterpret_cast(address), address_length);
+ family_ = IPv4;
+ } else if (address_length == 16) {
+ hostname_or_address_.assign(reinterpret_cast(address), address_length);
+ family_ = IPv6;
}
- return false;
}
-bool Address::init(const sockaddr* addr) {
+Address::Address(const struct sockaddr* addr)
+ : family_(UNRESOLVED)
+ , port_(0) {
if (addr->sa_family == AF_INET) {
- memcpy(addr_in(), addr, sizeof(struct sockaddr_in));
- return true;
+ const struct sockaddr_in* addr_in = reinterpret_cast(addr);
+ hostname_or_address_.assign(reinterpret_cast(&addr_in->sin_addr), 4);
+ port_ = ntohs(addr_in->sin_port);
+ family_ = IPv4;
} else if (addr->sa_family == AF_INET6) {
- memcpy(addr_in6(), addr, sizeof(struct sockaddr_in6));
- return true;
+ const struct sockaddr_in6* addr_in6 = reinterpret_cast(addr);
+ hostname_or_address_.assign(reinterpret_cast(&addr_in6->sin6_addr), 16);
+ port_ = ntohs(addr_in6->sin6_port);
+ family_ = IPv6;
}
- return false;
}
-void Address::init(const struct sockaddr_in* addr) { *addr_in() = *addr; }
+bool Address::equals(const Address& other, bool with_port) const {
+ if (family_ != other.family_) return false;
+ if (with_port && port_ != other.port_) return false;
+ if (server_name_ != other.server_name_) return false;
+ if (hostname_or_address_ != other.hostname_or_address_) return false;
+ return true;
+}
-void Address::init(const struct sockaddr_in6* addr) { *addr_in6() = *addr; }
+bool Address::operator<(const Address& other) const {
+ if (family_ != other.family_) return family_ < other.family_;
+ if (port_ != other.port_) return port_ < other.port_;
+ if (server_name_ != other.server_name_) return server_name_ < other.server_name_;
+ return hostname_or_address_ < other.hostname_or_address_;
+}
-int Address::port() const {
- if (family() == AF_INET) {
- return htons(addr_in()->sin_port);
- } else if (family() == AF_INET6) {
- return htons(addr_in6()->sin6_port);
+String Address::hostname_or_address() const {
+ if (family_ == IPv4) {
+ char name[INET_ADDRSTRLEN + 1] = { '\0' };
+ uv_inet_ntop(AF_INET, hostname_or_address_.data(), name, INET_ADDRSTRLEN);
+ return name;
+ } else if (family_ == IPv6) {
+ char name[INET6_ADDRSTRLEN + 1] = { '\0' };
+ uv_inet_ntop(AF_INET6, hostname_or_address_.data(), name, INET6_ADDRSTRLEN);
+ return name;
+ } else {
+ return hostname_or_address_;
}
- return -1;
}
-String Address::to_string(bool with_port) const {
- OStringStream ss;
- char host[INET6_ADDRSTRLEN + 1] = { '\0' };
- if (family() == AF_INET) {
- uv_ip4_name(const_cast(addr_in()), host, INET_ADDRSTRLEN);
- ss << host;
- if (with_port) ss << ":" << port();
- } else if (family() == AF_INET6) {
- uv_ip6_name(const_cast(addr_in6()), host, INET6_ADDRSTRLEN);
- if (with_port) ss << "[";
- ss << host;
- if (with_port) ss << "]:" << port();
- }
- return ss.str();
+size_t Address::hash_code() const {
+ SPARSEHASH_HASH hasher;
+ size_t code = hasher(family_);
+ hash_combine(code, port_);
+ hash_combine(code, server_name_);
+ hash_combine(code, hostname_or_address_);
+ return code;
}
-uint8_t Address::to_inet(uint8_t* data) const {
- if (family() == AF_INET) {
- memcpy(data, &addr_in()->sin_addr, 4);
- return 4;
- } else if (family() == AF_INET6) {
- memcpy(data, &addr_in6()->sin6_addr, 16);
- return 16;
+uint8_t Address::to_inet(void* address) const {
+ if (family_ == IPv4 || family_ == IPv6) {
+ size_t size = hostname_or_address_.size();
+ assert((size == 4 || size == 16) && "Invalid size for address");
+ hostname_or_address_.copy(reinterpret_cast(address), size);
+ return static_cast(size);
}
return 0;
}
-int Address::compare(const Address& a, bool with_port) const {
- if (family() != a.family()) {
- return family() < a.family() ? -1 : 1;
- }
- if (with_port && port() != a.port()) {
- return port() < a.port() ? -1 : 1;
- }
- if (family() == AF_INET) {
- if (addr_in()->sin_addr.s_addr != a.addr_in()->sin_addr.s_addr) {
- return addr_in()->sin_addr.s_addr < a.addr_in()->sin_addr.s_addr ? -1 : 1;
- }
- } else if (family() == AF_INET6) {
- return memcmp(&(addr_in6()->sin6_addr), &(a.addr_in6()->sin6_addr),
- sizeof(addr_in6()->sin6_addr));
+const struct sockaddr* Address::to_sockaddr(SocketStorage* storage) const {
+ int rc = 0;
+ if (family_ == IPv4) {
+ char name[INET_ADDRSTRLEN + 1] = { '\0' };
+ rc = uv_inet_ntop(AF_INET, hostname_or_address_.data(), name, INET_ADDRSTRLEN);
+ if (rc != 0) return NULL;
+ rc = uv_ip4_addr(name, port_, storage->addr_in());
+ } else if (family_ == IPv6) {
+ char name[INET6_ADDRSTRLEN + 1] = { '\0' };
+ rc = uv_inet_ntop(AF_INET6, hostname_or_address_.data(), name, INET6_ADDRSTRLEN);
+ if (rc != 0) return NULL;
+ rc = uv_ip6_addr(name, port_, storage->addr_in6());
+ } else {
+ return NULL;
}
- return 0;
+ if (rc != 0) return NULL;
+ return storage->addr();
}
-namespace datastax { namespace internal { namespace core {
-
-bool determine_address_for_peer_host(const Address& connected_address, const Value* peer_value,
- const Value* rpc_value, Address* output) {
- Address peer_address;
- if (!peer_value ||
- !peer_value->decoder().as_inet(peer_value->size(), connected_address.port(), &peer_address)) {
- LOG_WARN("Invalid address format for peer address");
- return false;
- }
- if (rpc_value && !rpc_value->is_null()) {
- if (!rpc_value->decoder().as_inet(rpc_value->size(), connected_address.port(), output)) {
- LOG_WARN("Invalid address format for rpc address");
- return false;
- }
- if (connected_address == *output || connected_address == peer_address) {
- LOG_DEBUG("system.peers on %s contains a line with rpc_address for itself. "
- "This is not normal, but is a known problem for some versions of DSE. "
- "Ignoring this entry.",
- connected_address.to_string(false).c_str());
- return false;
- }
- if (Address::BIND_ANY_IPV4.compare(*output, false) == 0 ||
- Address::BIND_ANY_IPV6.compare(*output, false) == 0) {
- LOG_WARN("Found host with 'bind any' for rpc_address; using listen_address (%s) to contact "
- "instead. "
- "If this is incorrect you should configure a specific interface for rpc_address on "
- "the server.",
- peer_address.to_string(false).c_str());
- *output = peer_address;
- }
+String Address::to_string(bool with_port) const {
+ OStringStream ss;
+ if (family_ == IPv6 && with_port) {
+ ss << "[" << hostname_or_address() << "]";
} else {
- LOG_WARN("No rpc_address for host %s in system.peers on %s. "
- "Ignoring this entry.",
- peer_address.to_string(false).c_str(), connected_address.to_string(false).c_str());
- return false;
+ ss << hostname_or_address();
}
- return true;
+ if (with_port) {
+ ss << ":" << port_;
+ }
+ if (!server_name_.empty()) {
+ ss << " (" << server_name_ << ")";
+ }
+ return ss.str();
}
+namespace datastax { namespace internal { namespace core {
+
String determine_listen_address(const Address& address, const Row* row) {
const Value* v = row->get_by_name("peer");
if (v != NULL) {
diff --git a/src/address.hpp b/src/address.hpp
index 77aaa6744..969ad03ad 100644
--- a/src/address.hpp
+++ b/src/address.hpp
@@ -18,130 +18,146 @@
#define DATASTAX_INTERNAL_ADDRESS_HPP
#include "allocated.hpp"
+#include "callback.hpp"
#include "dense_hash_set.hpp"
-#include "hash.hpp"
#include "string.hpp"
#include "vector.hpp"
-#include
-#include
#include
namespace datastax { namespace internal { namespace core {
class Row;
-class Value;
class Address : public Allocated {
public:
static const Address EMPTY_KEY;
static const Address DELETED_KEY;
- static const Address BIND_ANY_IPV4;
- static const Address BIND_ANY_IPV6;
+ enum Family { UNRESOLVED, IPv4, IPv6 };
- Address();
- Address(const String& ip, int port); // Tests only
+#ifdef _WIN32
+ struct SocketStorage {
+ struct sockaddr* addr() {
+ return reinterpret_cast(&storage);
+ }
+ struct sockaddr_in* addr_in() {
+ return reinterpret_cast(&storage);
+ }
+ struct sockaddr_in6* addr_in6() {
+ return reinterpret_cast(&storage);
+ }
+ struct sockaddr_storage storage;
+ };
+#else
+ struct SocketStorage {
+ struct sockaddr* addr() {
+ return &storage.addr;
+ }
+ struct sockaddr_in* addr_in() {
+ return &storage.addr_in;
+ }
+ struct sockaddr_in6* addr_in6() {
+ return &storage.addr_in6;
+ }
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr_in;
+ struct sockaddr_in6 addr_in6;
+ } storage;
+ };
+#endif
- static bool from_string(const String& ip, int port, Address* output = NULL);
+ Address();
+ Address(const Address& other, const String& server_name);
+ Address(const String& hostname_or_address, int port, const String& server_name = String());
+ Address(const uint8_t* address, uint8_t address_length, int port);
+ Address(const struct sockaddr* addr);
- static bool from_inet(const void* data, size_t size, int port, Address* output = NULL);
+ bool equals(const Address& other, bool with_port = true) const;
- bool init(const struct sockaddr* addr);
+ bool operator==(const Address& other) const { return equals(other); }
+ bool operator!=(const Address& other) const { return !equals(other); }
+ bool operator<(const Address& other) const;
-#ifdef _WIN32
- const struct sockaddr* addr() const { return reinterpret_cast(&addr_); }
- const struct sockaddr_in* addr_in() const {
- return reinterpret_cast(&addr_);
- }
- const struct sockaddr_in6* addr_in6() const {
- return reinterpret_cast(&addr_);
- }
-#else
- const struct sockaddr* addr() const { return &addr_; }
- const struct sockaddr_in* addr_in() const { return &addr_in_; }
- const struct sockaddr_in6* addr_in6() const { return &addr_in6_; }
-#endif
+public:
+ String hostname_or_address() const;
+ const String& server_name() const { return server_name_; }
+ Family family() const { return family_; }
+ int port() const { return port_; }
- bool is_valid() const { return family() == AF_INET || family() == AF_INET6; }
- int family() const { return addr()->sa_family; }
- int port() const;
+ bool is_valid() const { return !hostname_or_address_.empty(); }
+ bool is_resolved() const { return family_ == IPv4 || family_ == IPv6; }
+ bool is_valid_and_resolved() const { return is_valid() && is_resolved(); }
+public:
+ size_t hash_code() const;
+ uint8_t to_inet(void* address) const;
+ const struct sockaddr* to_sockaddr(SocketStorage* storage) const;
String to_string(bool with_port = false) const;
- uint8_t to_inet(uint8_t* data) const;
-
- int compare(const Address& a, bool with_port = true) const;
private:
- void init() { addr()->sa_family = AF_UNSPEC; }
- void init(const struct sockaddr_in* addr);
- void init(const struct sockaddr_in6* addr);
+ String hostname_or_address_;
+ String server_name_;
+ Family family_;
+ int port_;
+};
-#ifdef _WIN32
- struct sockaddr* addr() {
- return reinterpret_cast(&addr_);
- }
- struct sockaddr_in* addr_in() {
- return reinterpret_cast(&addr_);
- }
- struct sockaddr_in6* addr_in6() {
- return reinterpret_cast(&addr_);
- }
+String determine_listen_address(const Address& address, const Row* row);
- struct sockaddr_storage addr_;
-#else
- struct sockaddr* addr() {
- return &addr_;
- }
- struct sockaddr_in* addr_in() {
- return &addr_in_;
- }
- struct sockaddr_in6* addr_in6() {
- return &addr_in6_;
- }
+}}} // namespace datastax::internal::core
- union {
- struct sockaddr addr_;
- struct sockaddr_in addr_in_;
- struct sockaddr_in6 addr_in6_;
- };
+namespace std {
+
+#if defined(HASH_IN_TR1) && !defined(_WIN32)
+namespace tr1 {
#endif
+
+template <>
+struct hash {
+ size_t operator()(const datastax::internal::core::Address& address) const {
+ return address.hash_code();
+ }
};
-struct AddressHash {
- std::size_t operator()(const Address& a) const {
- if (a.family() == AF_INET) {
- return hash::fnv1a(reinterpret_cast(a.addr()), sizeof(struct sockaddr_in));
- } else if (a.family() == AF_INET6) {
- return hash::fnv1a(reinterpret_cast(a.addr()), sizeof(struct sockaddr_in6));
- }
- return 0;
+template <>
+struct hash {
+ size_t operator()(datastax::internal::core::Address::Family family) const {
+ return hasher(static_cast(family));
}
+ SPARSEHASH_HASH hasher;
};
-typedef Vector AddressVec;
-class AddressSet : public DenseHashSet {
+#if defined(HASH_IN_TR1) && !defined(_WIN32)
+} // namespace tr1
+#endif
+
+} // namespace std
+
+namespace datastax { namespace internal { namespace core {
+
+class AddressSet : public DenseHashSet {
public:
AddressSet() {
set_empty_key(Address::EMPTY_KEY);
set_deleted_key(Address::DELETED_KEY);
}
};
+typedef Vector AddressVec;
-inline bool operator<(const Address& a, const Address& b) { return a.compare(b) < 0; }
-
-inline bool operator==(const Address& a, const Address& b) { return a.compare(b) == 0; }
+}}} // namespace datastax::internal::core
-inline bool operator!=(const Address& a, const Address& b) { return a.compare(b) != 0; }
+namespace std {
-inline std::ostream& operator<<(std::ostream& os, const Address& addr) {
- return os << addr.to_string();
+inline std::ostream& operator<<(std::ostream& os, const datastax::internal::core::Address& a) {
+ return os << a.to_string();
}
-inline std::ostream& operator<<(std::ostream& os, const AddressVec& v) {
+inline std::ostream& operator<<(std::ostream& os, const datastax::internal::core::AddressVec& v) {
os << "[";
bool first = true;
- for (AddressVec::const_iterator it = v.begin(), end = v.end(); it != end; ++it) {
+ for (datastax::internal::core::AddressVec::const_iterator it = v.begin(), end = v.end();
+ it != end; ++it) {
if (!first) os << ", ";
first = false;
os << *it;
@@ -150,11 +166,6 @@ inline std::ostream& operator<<(std::ostream& os, const AddressVec& v) {
return os;
}
-bool determine_address_for_peer_host(const Address& connected_address, const Value* peer_value,
- const Value* rpc_value, Address* output);
-
-String determine_listen_address(const Address& address, const Row* row);
-
-}}} // namespace datastax::internal::core
+} // namespace std
#endif
diff --git a/src/address_factory.cpp b/src/address_factory.cpp
new file mode 100644
index 000000000..9ec274097
--- /dev/null
+++ b/src/address_factory.cpp
@@ -0,0 +1,80 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "address_factory.hpp"
+
+#include "row.hpp"
+
+using namespace datastax::internal::core;
+
+bool DefaultAddressFactory::create(const Row* peers_row, const Host::Ptr& connected_host,
+ Address* output) {
+ Address connected_address = connected_host->address();
+ const Value* peer_value = peers_row->get_by_name("peer");
+ const Value* rpc_value = peers_row->get_by_name("rpc_address");
+
+ Address peer_address;
+ if (!peer_value ||
+ !peer_value->decoder().as_inet(peer_value->size(), connected_address.port(), &peer_address)) {
+ LOG_WARN("Invalid address format for peer address");
+ return false;
+ }
+ if (rpc_value && !rpc_value->is_null()) {
+ if (!rpc_value->decoder().as_inet(rpc_value->size(), connected_address.port(), output)) {
+ LOG_WARN("Invalid address format for rpc address");
+ return false;
+ }
+ if (connected_address == *output || connected_address == peer_address) {
+ LOG_DEBUG("system.peers on %s contains a line with rpc_address for itself. "
+ "This is not normal, but is a known problem for some versions of DSE. "
+ "Ignoring this entry.",
+ connected_address.to_string(false).c_str());
+ return false;
+ }
+ if (Address("0.0.0.0", 0).equals(*output, false) || Address("::", 0).equals(*output, false)) {
+ LOG_WARN("Found host with 'bind any' for rpc_address; using listen_address (%s) to contact "
+ "instead. If this is incorrect you should configure a specific interface for "
+ "rpc_address on the server.",
+ peer_address.to_string(false).c_str());
+ *output = peer_address;
+ }
+ } else {
+ LOG_WARN("No rpc_address for host %s in system.peers on %s. Ignoring this entry.",
+ peer_address.to_string(false).c_str(), connected_address.to_string(false).c_str());
+ return false;
+ }
+ return true;
+}
+
+bool SniAddressFactory::create(const Row* peers_row, const Host::Ptr& connected_host,
+ Address* output) {
+ CassUuid host_id;
+ if (!peers_row->get_uuid_by_name("host_id", &host_id)) {
+ // Attempt to get an peer address for the error log.
+ Address peer_address;
+ const Value* peer_value = peers_row->get_by_name("peer");
+ if (!peer_value || !peer_value->decoder().as_inet(
+ peer_value->size(), connected_host->address().port(), &peer_address)) {
+ LOG_WARN("Invalid address format for peer address");
+ }
+ LOG_ERROR("Invalid `host_id` for host. %s will be ignored.",
+ peer_address.is_valid() ? peer_address.to_string().c_str() : "");
+ return false;
+ }
+ *output = Address(connected_host->address().hostname_or_address(),
+ connected_host->address().port(), to_string(host_id));
+ return true;
+}
diff --git a/src/address_factory.hpp b/src/address_factory.hpp
new file mode 100644
index 000000000..1310d5d14
--- /dev/null
+++ b/src/address_factory.hpp
@@ -0,0 +1,63 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef DATASTAX_INTERNAL_ADDRESS_FACTORY_HPP
+#define DATASTAX_INTERNAL_ADDRESS_FACTORY_HPP
+
+#include "config.hpp"
+#include "host.hpp"
+#include "ref_counted.hpp"
+
+namespace datastax { namespace internal { namespace core {
+
+class Row;
+
+/**
+ * An interface for constructing `Address` from `system.local`/`system.peers` row data.
+ */
+class AddressFactory : public RefCounted {
+public:
+ typedef SharedRefPtr Ptr;
+ virtual ~AddressFactory() {}
+ virtual bool create(const Row* peers_row, const Host::Ptr& connected_host, Address* output) = 0;
+};
+
+/**
+ * An address factory that creates `Address` using the `rpc_address` column.
+ */
+class DefaultAddressFactory : public AddressFactory {
+ virtual bool create(const Row* peers_row, const Host::Ptr& connected_host, Address* output);
+};
+
+/**
+ * An address factory that creates `Address` using the connected host's address and the `host_id`
+ * (for the SNI servername) column.
+ */
+class SniAddressFactory : public AddressFactory {
+ virtual bool create(const Row* peers_row, const Host::Ptr& connected_host, Address* output);
+};
+
+inline AddressFactory* create_address_factory_from_config(const Config& config) {
+ if (config.cloud_secure_connection_config().is_loaded()) {
+ return new SniAddressFactory();
+ } else {
+ return new DefaultAddressFactory();
+ }
+}
+
+}}} // namespace datastax::internal::core
+
+#endif
diff --git a/src/allocated.hpp b/src/allocated.hpp
index 073dcd76e..51bc8fd09 100644
--- a/src/allocated.hpp
+++ b/src/allocated.hpp
@@ -30,6 +30,8 @@ class Allocated {
void* operator new(size_t, void* p) { return p; }
void* operator new[](size_t, void* p) { return p; }
+ void operator delete(void* ptr, void* p) {}
+ void operator delete[](void* ptr, void* p) {}
};
template
diff --git a/src/auth.cpp b/src/auth.cpp
index 6d352b7cf..227e02b39 100644
--- a/src/auth.cpp
+++ b/src/auth.cpp
@@ -24,6 +24,12 @@
using namespace datastax;
using namespace datastax::internal::core;
+using namespace datastax::internal::enterprise;
+
+#define DSE_AUTHENTICATOR "com.datastax.bdp.cassandra.auth.DseAuthenticator"
+
+#define DSE_PLAINTEXT_AUTH_MECHANISM "PLAIN"
+#define DSE_PLAINTEXT_AUTH_SERVER_INITIAL_CHALLENGE "PLAIN-START"
extern "C" {
@@ -95,6 +101,37 @@ bool PlainTextAuthenticator::success(const String& token) {
return true;
}
+bool DsePlainTextAuthenticator::initial_response(String* response) {
+ if (class_name_ == DSE_AUTHENTICATOR) {
+ response->assign(DSE_PLAINTEXT_AUTH_MECHANISM);
+ return true;
+ } else {
+ return evaluate_challenge(DSE_PLAINTEXT_AUTH_SERVER_INITIAL_CHALLENGE, response);
+ }
+}
+
+bool DsePlainTextAuthenticator::evaluate_challenge(const String& token, String* response) {
+ if (token != DSE_PLAINTEXT_AUTH_SERVER_INITIAL_CHALLENGE) {
+ LOG_ERROR("Invalid start token for DSE plaintext authenticator during challenge: '%s'",
+ token.c_str());
+ return false;
+ }
+
+ // Credentials are of the form "\0\0"
+ response->append(authorization_id_);
+ response->push_back('\0');
+ response->append(username_);
+ response->push_back('\0');
+ response->append(password_);
+
+ return true;
+}
+
+bool DsePlainTextAuthenticator::success(const String& token) {
+ // no-op
+ return true;
+}
+
ExternalAuthenticator::ExternalAuthenticator(const Address& address, const String& hostname,
const String& class_name,
const CassAuthenticatorCallbacks* callbacks,
diff --git a/src/auth.hpp b/src/auth.hpp
index 3f6f4a4d0..0469a7498 100644
--- a/src/auth.hpp
+++ b/src/auth.hpp
@@ -166,6 +166,52 @@ class PlainTextAuthProvider : public AuthProvider {
}}} // namespace datastax::internal::core
+namespace datastax { namespace internal { namespace enterprise {
+
+class DsePlainTextAuthenticator : public core::Authenticator {
+public:
+ DsePlainTextAuthenticator(const String& class_name, const String& username,
+ const String& password, const String& authorization_id)
+ : class_name_(class_name)
+ , username_(username)
+ , password_(password)
+ , authorization_id_(authorization_id) {}
+
+ virtual bool initial_response(String* response);
+ virtual bool evaluate_challenge(const String& token, String* response);
+ virtual bool success(const String& token);
+
+private:
+ String class_name_;
+ String username_;
+ String password_;
+ String authorization_id_;
+};
+
+class DsePlainTextAuthProvider : public core::AuthProvider {
+public:
+ DsePlainTextAuthProvider(const String& username, const String& password,
+ const String& authorization_id)
+ : AuthProvider("DsePlainTextAuthProvider")
+ , username_(username)
+ , password_(password)
+ , authorization_id_(authorization_id) {}
+
+ virtual core::Authenticator::Ptr new_authenticator(const core::Address& address,
+ const String& hostname,
+ const String& class_name) const {
+ return core::Authenticator::Ptr(
+ new DsePlainTextAuthenticator(class_name, username_, password_, authorization_id_));
+ }
+
+private:
+ String username_;
+ String password_;
+ String authorization_id_;
+};
+
+}}} // namespace datastax::internal::enterprise
+
EXTERNAL_TYPE(datastax::internal::core::ExternalAuthenticator, CassAuthenticator)
#endif
diff --git a/src/batch_request.cpp b/src/batch_request.cpp
index ce21cd157..37a94010e 100644
--- a/src/batch_request.cpp
+++ b/src/batch_request.cpp
@@ -130,7 +130,7 @@ int BatchRequest::encode(ProtocolVersion version, RequestCallback* callback,
Buffer buf(buf_size);
size_t pos = buf.encode_byte(0, type_);
- buf.encode_uint16(pos, statements().size());
+ buf.encode_uint16(pos, static_cast(statements().size()));
bufs->push_back(buf);
length += buf_size;
@@ -183,7 +183,7 @@ int BatchRequest::encode(ProtocolVersion version, RequestCallback* callback,
if (version >= CASS_PROTOCOL_VERSION_V5) {
pos = buf.encode_int32(pos, flags);
} else {
- pos = buf.encode_byte(pos, flags);
+ pos = buf.encode_byte(pos, static_cast(flags));
}
if (callback->serial_consistency() != 0) {
@@ -195,7 +195,7 @@ int BatchRequest::encode(ProtocolVersion version, RequestCallback* callback,
}
if (version.supports_set_keyspace() && !keyspace().empty()) {
- pos = buf.encode_string(pos, keyspace().data(), keyspace().size());
+ pos = buf.encode_string(pos, keyspace().data(), static_cast(keyspace().size()));
}
bufs->push_back(buf);
diff --git a/src/batch_request.hpp b/src/batch_request.hpp
index efeff0f4c..fc77953e7 100644
--- a/src/batch_request.hpp
+++ b/src/batch_request.hpp
@@ -33,11 +33,12 @@ class ExecuteRequest;
class BatchRequest : public RoutableRequest {
public:
+ typedef SharedRefPtr Ptr;
typedef Vector StatementVec;
- BatchRequest(uint8_t type_)
+ BatchRequest(uint8_t type)
: RoutableRequest(CQL_OPCODE_BATCH)
- , type_(type_) {}
+ , type_(type) {}
uint8_t type() const { return type_; }
diff --git a/src/blacklist_policy.cpp b/src/blacklist_policy.cpp
index c2e0d5aa0..61d367a71 100644
--- a/src/blacklist_policy.cpp
+++ b/src/blacklist_policy.cpp
@@ -19,7 +19,7 @@
using namespace datastax::internal::core;
bool BlacklistPolicy::is_valid_host(const Host::Ptr& host) const {
- const String& host_address = host->address().to_string(false);
+ const String& host_address = host->address().hostname_or_address();
for (ContactPointList::const_iterator it = hosts_.begin(), end = hosts_.end(); it != end; ++it) {
if (host_address.compare(*it) == 0) {
return false;
diff --git a/src/callback.hpp b/src/callback.hpp
index dc8dc3d56..1e59b11f4 100644
--- a/src/callback.hpp
+++ b/src/callback.hpp
@@ -65,7 +65,7 @@ class Callback {
return *this;
}
- operator bool() const { return invoker_; }
+ operator bool() const { return invoker_ != NULL; }
R operator()(const Arg& arg) const { return invoker_->invoke(arg); }
diff --git a/src/cloud_secure_connection_config.cpp b/src/cloud_secure_connection_config.cpp
new file mode 100644
index 000000000..a633e87a1
--- /dev/null
+++ b/src/cloud_secure_connection_config.cpp
@@ -0,0 +1,318 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "cloud_secure_connection_config.hpp"
+
+#include "auth.hpp"
+#include "cluster.hpp"
+#include "cluster_metadata_resolver.hpp"
+#include "config.hpp"
+#include "http_client.hpp"
+#include "json.hpp"
+#include "logger.hpp"
+#include "ssl.hpp"
+#include "utils.hpp"
+
+using namespace datastax;
+using namespace datastax::internal;
+using namespace datastax::internal::core;
+
+#define CLOUD_ERROR "Unable to load cloud secure connection configuration: "
+#define METADATA_SERVER_ERROR "Unable to configure driver from metadata server: "
+
+// Pinned to v1 because that's what the driver currently handles.
+#define METADATA_SERVER_PATH "/metadata?version=1"
+
+#define METADATA_SERVER_PORT 30443
+#define RESPONSE_BODY_TRUNCATE_LENGTH 1024
+
+#ifdef HAVE_ZLIB
+#include "unzip.h"
+
+#define CONFIGURATION_FILE "config.json"
+#define CERTIFICATE_AUTHORITY_FILE "ca.crt"
+#define CERTIFICATE_FILE "cert"
+#define KEY_FILE "key"
+
+class UnzipFile {
+public:
+ UnzipFile()
+ : file(NULL) {}
+
+ ~UnzipFile() { unzClose(file); }
+
+ bool open(const String& filename) { return (file = unzOpen(filename.c_str())) != NULL; }
+
+ bool read_contents(const String& filename, String* contents) {
+ int rc = unzLocateFile(file, filename.c_str(), 0);
+ if (rc != UNZ_OK) {
+ return false;
+ }
+
+ rc = unzOpenCurrentFile(file);
+ if (rc != UNZ_OK) {
+ return false;
+ }
+
+ unz_file_info file_info;
+ rc = unzGetCurrentFileInfo(file, &file_info, 0, 0, 0, 0, 0, 0);
+ if (rc != UNZ_OK) {
+ unzCloseCurrentFile(file);
+ return false;
+ }
+
+ contents->resize(file_info.uncompressed_size, 0);
+ unzReadCurrentFile(file, &(*contents)[0], contents->size());
+ unzCloseCurrentFile(file);
+
+ return true;
+ }
+
+private:
+ unzFile file;
+};
+#endif
+
+namespace {
+
+class CloudClusterMetadataResolver : public ClusterMetadataResolver {
+public:
+ CloudClusterMetadataResolver(const String& host, int port, const SocketSettings& settings,
+ uint64_t request_timeout_ms)
+ : client_(new HttpClient(Address(host, port), METADATA_SERVER_PATH,
+ bind_callback(&CloudClusterMetadataResolver::on_response, this))) {
+ client_->with_settings(settings)->with_request_timeout_ms(request_timeout_ms);
+ }
+
+private:
+ virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) {
+ inc_ref();
+ client_->request(loop);
+ }
+
+ virtual void internal_cancel() { client_->cancel(); }
+
+private:
+ void on_response(HttpClient* http_client) {
+ if (http_client->is_ok()) {
+ if (http_client->content_type().find("json") != std::string::npos) {
+ parse_metadata(http_client->response_body());
+ } else {
+ LOG_ERROR(METADATA_SERVER_ERROR "Invalid response content type: '%s'",
+ http_client->content_type().c_str());
+ }
+ } else if (!http_client->is_canceled()) {
+ if (http_client->is_error_status_code()) {
+ String error_message =
+ http_client->response_body().substr(0, RESPONSE_BODY_TRUNCATE_LENGTH);
+ if (http_client->content_type().find("json") != std::string::npos) {
+ json::Document document;
+ document.Parse(http_client->response_body().c_str());
+ if (document.IsObject() && document.HasMember("message") &&
+ document["message"].IsString()) {
+ error_message = document["message"].GetString();
+ }
+ }
+ LOG_ERROR(METADATA_SERVER_ERROR "Returned error response code %u: '%s'",
+ http_client->status_code(), error_message.c_str());
+ } else {
+ LOG_ERROR(METADATA_SERVER_ERROR "%s", http_client->error_message().c_str());
+ }
+ }
+
+ callback_(this);
+ dec_ref();
+ }
+
+ void parse_metadata(const String& response_body) {
+ json::Document document;
+ document.Parse(response_body.c_str());
+
+ if (!document.IsObject()) {
+ LOG_ERROR(METADATA_SERVER_ERROR "Metadata JSON is invalid");
+ return;
+ }
+
+ if (!document.HasMember("contact_info") || !document["contact_info"].IsObject()) {
+ LOG_ERROR(METADATA_SERVER_ERROR "Contact information is not available");
+ return;
+ }
+
+ const json::Value& contact_info = document["contact_info"];
+
+ if (!contact_info.HasMember("local_dc") || !contact_info["local_dc"].IsString()) {
+ LOG_ERROR(METADATA_SERVER_ERROR "Local DC is not available");
+ return;
+ }
+
+ local_dc_ = contact_info["local_dc"].GetString();
+
+ if (!contact_info.HasMember("sni_proxy_address") ||
+ !contact_info["sni_proxy_address"].IsString()) {
+ LOG_ERROR(METADATA_SERVER_ERROR "SNI proxy address is not available");
+ return;
+ }
+
+ int sni_port = METADATA_SERVER_PORT;
+ Vector tokens;
+ explode(contact_info["sni_proxy_address"].GetString(), tokens, ':');
+ String sni_address = tokens[0];
+ if (tokens.size() == 2) {
+ IStringStream ss(tokens[1]);
+ if ((ss >> sni_port).fail()) {
+ LOG_WARN(METADATA_SERVER_ERROR "Invalid port, default %d will be used",
+ METADATA_SERVER_PORT);
+ }
+ }
+
+ if (!contact_info.HasMember("contact_points") || !contact_info["contact_points"].IsArray()) {
+ LOG_ERROR(METADATA_SERVER_ERROR "Contact points are not available");
+ return;
+ }
+
+ const json::Value& contact_points = contact_info["contact_points"];
+ for (rapidjson::SizeType i = 0; i < contact_points.Size(); ++i) {
+ if (contact_points[i].IsString()) {
+ String host_id = contact_points[i].GetString();
+ resolved_contact_points_.push_back(Address(sni_address, sni_port, host_id));
+ }
+ }
+ }
+
+private:
+ HttpClient::Ptr client_;
+};
+
+class CloudClusterMetadataResolverFactory : public ClusterMetadataResolverFactory {
+public:
+ CloudClusterMetadataResolverFactory(const String& host, int port)
+ : host_(host)
+ , port_(port) {}
+
+ virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const {
+ return ClusterMetadataResolver::Ptr(new CloudClusterMetadataResolver(
+ host_, port_, settings.control_connection_settings.connection_settings.socket_settings,
+ settings.control_connection_settings.connection_settings.connect_timeout_ms));
+ }
+
+ virtual const char* name() const { return "Cloud"; }
+
+private:
+ String host_;
+ int port_;
+};
+
+} // namespace
+
+CloudSecureConnectionConfig::CloudSecureConnectionConfig()
+ : is_loaded_(false)
+ , port_(0) {}
+
+bool CloudSecureConnectionConfig::load(const String& filename, Config* config /* = NULL */) {
+#ifndef HAVE_ZLIB
+ LOG_ERROR(CLOUD_ERROR "Driver was not built with zlib support");
+ return false;
+#else
+ UnzipFile zip_file;
+ if (!zip_file.open(filename.c_str())) {
+ LOG_ERROR(CLOUD_ERROR "Unable to open zip file %s; file does not exist or is invalid",
+ filename.c_str());
+ return false;
+ }
+
+ String contents;
+ if (!zip_file.read_contents(CONFIGURATION_FILE, &contents)) {
+ LOG_ERROR(CLOUD_ERROR "Missing configuration file %s", CONFIGURATION_FILE);
+ return false;
+ }
+
+ json::MemoryStream memory_stream(contents.c_str(), contents.size());
+ json::AutoUTFMemoryInputStream auto_utf_stream(memory_stream);
+ json::Document document;
+ document.ParseStream(auto_utf_stream);
+ if (!document.IsObject()) {
+ LOG_ERROR(CLOUD_ERROR "Invalid configuration");
+ return false;
+ }
+
+ if (document.HasMember("username") && document["username"].IsString()) {
+ username_ = document["username"].GetString();
+ }
+ if (document.HasMember("password") && document["password"].IsString()) {
+ password_ = document["password"].GetString();
+ }
+
+ if (config && (!username_.empty() || !password_.empty())) {
+ config->set_auth_provider(
+ AuthProvider::Ptr(new enterprise::DsePlainTextAuthProvider(username_, password_, "")));
+ }
+
+ if (!document.HasMember("host") || !document["host"].IsString()) {
+ LOG_ERROR(CLOUD_ERROR "Missing host");
+ return false;
+ }
+ if (!document.HasMember("port") || !document["port"].IsInt()) {
+ LOG_ERROR(CLOUD_ERROR "Missing port");
+ return false;
+ }
+ host_ = document["host"].GetString();
+ port_ = document["port"].GetInt();
+
+ if (!zip_file.read_contents(CERTIFICATE_AUTHORITY_FILE, &ca_cert_)) {
+ LOG_ERROR(CLOUD_ERROR "Missing certificate authority file %s", CERTIFICATE_AUTHORITY_FILE);
+ return false;
+ }
+
+ if (!zip_file.read_contents(CERTIFICATE_FILE, &cert_)) {
+ LOG_ERROR(CLOUD_ERROR "Missing certificate file %s", CERTIFICATE_FILE);
+ return false;
+ }
+
+ if (!zip_file.read_contents(KEY_FILE, &key_)) {
+ LOG_ERROR(CLOUD_ERROR "Missing key file %s", KEY_FILE);
+ return false;
+ }
+
+ if (config) {
+ SslContext::Ptr ssl_context(SslContextFactory::create());
+
+ ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_CERT | CASS_SSL_VERIFY_PEER_IDENTITY_DNS);
+
+ if (ssl_context->add_trusted_cert(ca_cert_.c_str(), ca_cert_.length()) != CASS_OK) {
+ LOG_ERROR(CLOUD_ERROR "Invalid CA certificate %s", CERTIFICATE_AUTHORITY_FILE);
+ return false;
+ }
+
+ if (ssl_context->set_cert(cert_.c_str(), cert_.length()) != CASS_OK) {
+ LOG_ERROR(CLOUD_ERROR "Invalid client certificate %s", CERTIFICATE_FILE);
+ return false;
+ }
+
+ if (ssl_context->set_private_key(key_.c_str(), key_.length(), NULL, 0) != CASS_OK) {
+ LOG_ERROR(CLOUD_ERROR "Invalid client private key %s", KEY_FILE);
+ return false;
+ }
+
+ config->set_ssl_context(ssl_context);
+
+ config->set_cluster_metadata_resolver_factory(
+ ClusterMetadataResolverFactory::Ptr(new CloudClusterMetadataResolverFactory(host_, port_)));
+ }
+
+ is_loaded_ = true;
+ return true;
+#endif
+}
diff --git a/src/cloud_secure_connection_config.hpp b/src/cloud_secure_connection_config.hpp
new file mode 100644
index 000000000..8bda008d1
--- /dev/null
+++ b/src/cloud_secure_connection_config.hpp
@@ -0,0 +1,55 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef DATASTAX_INTERNAL_CLOUD_SECURE_CONNECTION_CONFIG_HPP
+#define DATASTAX_INTERNAL_CLOUD_SECURE_CONNECTION_CONFIG_HPP
+
+#include "string.hpp"
+
+namespace datastax { namespace internal { namespace core {
+
+class Config;
+
+class CloudSecureConnectionConfig {
+public:
+ CloudSecureConnectionConfig();
+
+ bool load(const String& filename, Config* config = NULL);
+ bool is_loaded() const { return is_loaded_; }
+
+ const String& username() const { return username_; }
+ const String& password() const { return password_; }
+ const String& host() const { return host_; }
+ int port() const { return port_; }
+
+ const String& ca_cert() const { return ca_cert_; }
+ const String& cert() const { return cert_; }
+ const String& key() const { return key_; }
+
+private:
+ bool is_loaded_;
+ String username_;
+ String password_;
+ String host_;
+ int port_;
+ String ca_cert_;
+ String cert_;
+ String key_;
+};
+
+}}} // namespace datastax::internal::core
+
+#endif
diff --git a/src/cluster.cpp b/src/cluster.cpp
index b6c49e544..f03eaabd0 100644
--- a/src/cluster.cpp
+++ b/src/cluster.cpp
@@ -201,7 +201,8 @@ ClusterSettings::ClusterSettings()
, reconnection_policy(new ExponentialReconnectionPolicy())
, prepare_on_up_or_add_host(CASS_DEFAULT_PREPARE_ON_UP_OR_ADD_HOST)
, max_prepares_per_flush(CASS_DEFAULT_MAX_PREPARES_PER_FLUSH)
- , disable_events_on_startup(false) {
+ , disable_events_on_startup(false)
+ , cluster_metadata_resolver_factory(new DefaultClusterMetadataResolverFactory()) {
load_balancing_policies.push_back(load_balancing_policy);
}
@@ -213,14 +214,15 @@ ClusterSettings::ClusterSettings(const Config& config)
, reconnection_policy(config.reconnection_policy())
, prepare_on_up_or_add_host(config.prepare_on_up_or_add_host())
, max_prepares_per_flush(CASS_DEFAULT_MAX_PREPARES_PER_FLUSH)
- , disable_events_on_startup(false) {}
+ , disable_events_on_startup(false)
+ , cluster_metadata_resolver_factory(config.cluster_metadata_resolver_factory()) {}
Cluster::Cluster(const ControlConnection::Ptr& connection, ClusterListener* listener,
EventLoop* event_loop, const Host::Ptr& connected_host, const HostMap& hosts,
const ControlConnectionSchema& schema,
const LoadBalancingPolicy::Ptr& load_balancing_policy,
- const LoadBalancingPolicy::Vec& load_balancing_policies,
- const ClusterSettings& settings)
+ const LoadBalancingPolicy::Vec& load_balancing_policies, const String& local_dc,
+ const StringMultimap& supported_options, const ClusterSettings& settings)
: connection_(connection)
, listener_(listener ? listener : &nop_cluster_listener__)
, event_loop_(event_loop)
@@ -230,6 +232,8 @@ Cluster::Cluster(const ControlConnection::Ptr& connection, ClusterListener* list
, is_closing_(false)
, connected_host_(connected_host)
, hosts_(hosts)
+ , local_dc_(local_dc)
+ , supported_options_(supported_options)
, is_recording_events_(settings.disable_events_on_startup) {
inc_ref();
connection_->set_listener(this);
@@ -357,7 +361,7 @@ void Cluster::update_schema(const ControlConnectionSchema& schema) {
void Cluster::update_token_map(const HostMap& hosts, const String& partitioner,
const ControlConnectionSchema& schema) {
- if (settings_.control_connection_settings.token_aware_routing && schema.keyspaces) {
+ if (settings_.control_connection_settings.use_token_aware_routing && schema.keyspaces) {
// Create a new token map and populate it
token_map_ = TokenMap::from_partitioner(partitioner);
if (!token_map_) {
diff --git a/src/cluster.hpp b/src/cluster.hpp
index 5fab3f645..4822164d6 100644
--- a/src/cluster.hpp
+++ b/src/cluster.hpp
@@ -213,6 +213,13 @@ struct ClusterSettings {
* started by calling `Cluster::start_events()`.
*/
bool disable_events_on_startup;
+
+ /**
+ * A factory for creating cluster metadata resolvers. A cluster metadata resolver is used to
+ * determine contact points and retrieve other metadata required to connect the
+ * cluster.
+ */
+ ClusterMetadataResolverFactory::Ptr cluster_metadata_resolver_factory;
};
/**
@@ -241,6 +248,9 @@ class Cluster
* @param load_balancing_policy The default load balancing policy to use for
* determining the next control connection host.
* @param load_balancing_policies
+ * @param local_dc The local datacenter determined by the metadata service for initializing the
+ * load balancing policies.
+ * @param supported_options Supported options discovered during control connection.
* @param settings The control connection settings to use for reconnecting the
* control connection.
*/
@@ -248,7 +258,8 @@ class Cluster
EventLoop* event_loop, const Host::Ptr& connected_host, const HostMap& hosts,
const ControlConnectionSchema& schema,
const LoadBalancingPolicy::Ptr& load_balancing_policy,
- const LoadBalancingPolicy::Vec& load_balancing_policies, const ClusterSettings& settings);
+ const LoadBalancingPolicy::Vec& load_balancing_policies, const String& local_dc,
+ const StringMultimap& supported_options, const ClusterSettings& settings);
/**
* Set the listener that will handle events for the cluster
@@ -341,7 +352,9 @@ class Cluster
ProtocolVersion protocol_version() const { return connection_->protocol_version(); }
const Host::Ptr& connected_host() const { return connected_host_; }
const TokenMap::Ptr& token_map() const { return token_map_; }
+ const String& local_dc() const { return local_dc_; }
const VersionNumber& dse_server_version() const { return connection_->dse_server_version(); }
+ const StringMultimap& supported_options() const { return supported_options_; }
private:
friend class ClusterRunClose;
@@ -426,6 +439,8 @@ class Cluster
Metadata metadata_;
PreparedMetadata prepared_metadata_;
TokenMap::Ptr token_map_;
+ String local_dc_;
+ StringMultimap supported_options_;
Timer timer_;
bool is_recording_events_;
ClusterEvent::Vec recorded_events_;
diff --git a/src/cluster_config.cpp b/src/cluster_config.cpp
index 9e1dba162..fda24f978 100644
--- a/src/cluster_config.cpp
+++ b/src/cluster_config.cpp
@@ -17,6 +17,7 @@
#include "cluster_config.hpp"
using namespace datastax;
+using namespace datastax::internal;
using namespace datastax::internal::core;
extern "C" {
@@ -26,13 +27,21 @@ CassCluster* cass_cluster_new() { return CassCluster::to(new ClusterConfig()); }
CassError cass_cluster_set_port(CassCluster* cluster, int port) {
if (port <= 0) {
return CASS_ERROR_LIB_BAD_PARAMS;
+ } else if (cluster->config().cloud_secure_connection_config().is_loaded()) {
+ LOG_ERROR("Port cannot be overridden with cloud secure connection bundle");
+ return CASS_ERROR_LIB_BAD_PARAMS;
}
+
cluster->config().set_port(port);
return CASS_OK;
}
void cass_cluster_set_ssl(CassCluster* cluster, CassSsl* ssl) {
- cluster->config().set_ssl_context(ssl->from());
+ if (cluster->config().cloud_secure_connection_config().is_loaded()) {
+ LOG_ERROR("SSL context cannot be overridden with cloud secure connection bundle");
+ } else {
+ cluster->config().set_ssl_context(ssl->from());
+ }
}
CassError cass_cluster_set_protocol_version(CassCluster* cluster, int protocol_version) {
@@ -100,10 +109,20 @@ CassError cass_cluster_set_contact_points(CassCluster* cluster, const char* cont
CassError cass_cluster_set_contact_points_n(CassCluster* cluster, const char* contact_points,
size_t contact_points_length) {
+ if (cluster->config().cloud_secure_connection_config().is_loaded()) {
+ LOG_ERROR("Contact points cannot be overridden with cloud secure connection bundle");
+ return CASS_ERROR_LIB_BAD_PARAMS;
+ }
+
if (contact_points_length == 0) {
cluster->config().contact_points().clear();
} else {
- explode(String(contact_points, contact_points_length), cluster->config().contact_points());
+ Vector exploded;
+ explode(String(contact_points, contact_points_length), exploded);
+ for (Vector::const_iterator it = exploded.begin(), end = exploded.end(); it != end;
+ ++it) {
+ cluster->config().contact_points().push_back(Address(*it, -1));
+ }
}
return CASS_OK;
}
@@ -393,7 +412,7 @@ CassError cass_cluster_set_use_hostname_resolution(CassCluster* cluster, cass_bo
CassError cass_cluster_set_use_randomized_contact_points(CassCluster* cluster,
cass_bool_t enabled) {
- cluster->config().set_use_randomized_contact_points(enabled);
+ cluster->config().set_use_randomized_contact_points(enabled == cass_true);
return CASS_OK;
}
@@ -448,12 +467,15 @@ CassError cass_cluster_set_local_address(CassCluster* cluster, const char* name)
CassError cass_cluster_set_local_address_n(CassCluster* cluster, const char* name,
size_t name_length) {
- Address address; // default to AF_UNSPEC
- if (name_length == 0 || name == NULL ||
- Address::from_string(String(name, name_length), 0, &address)) {
- cluster->config().set_local_address(address);
+ if (name_length == 0 || name == NULL) {
+ cluster->config().set_local_address(Address());
} else {
- return CASS_ERROR_LIB_HOST_RESOLUTION;
+ Address address(String(name, name_length), 0);
+ if (address.is_valid_and_resolved()) {
+ cluster->config().set_local_address(address);
+ } else {
+ return CASS_ERROR_LIB_HOST_RESOLUTION;
+ }
}
return CASS_OK;
}
@@ -470,6 +492,53 @@ CassError cass_cluster_set_host_listener_callback(CassCluster* cluster,
return CASS_OK;
}
+CassError cass_cluster_set_cloud_secure_connection_bundle(CassCluster* cluster, const char* path) {
+ return cass_cluster_set_cloud_secure_connection_bundle_n(cluster, path, SAFE_STRLEN(path));
+}
+
+CassError cass_cluster_set_cloud_secure_connection_bundle_n(CassCluster* cluster, const char* path,
+ size_t path_length) {
+ if (cluster->config().contact_points().empty() && !cluster->config().ssl_context()) {
+ SslContextFactory::init_once();
+ }
+ return cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(cluster, path,
+ path_length);
+}
+
+CassError cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(CassCluster* cluster,
+ const char* path) {
+ return cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(cluster, path,
+ SAFE_STRLEN(path));
+}
+
+CassError cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(CassCluster* cluster,
+ const char* path,
+ size_t path_length) {
+ const AddressVec& contact_points = cluster->config().contact_points();
+ const SslContext::Ptr& ssl_context = cluster->config().ssl_context();
+ if (!contact_points.empty() || ssl_context) {
+ String message;
+ if (!cluster->config().contact_points().empty()) {
+ message.append("Contact points");
+ }
+ if (cluster->config().ssl_context()) {
+ if (!message.empty()) {
+ message.append(" and ");
+ }
+ message.append("SSL context");
+ }
+ message.append(" must not be specified with cloud secure connection bundle");
+ LOG_ERROR("%s", message.c_str());
+
+ return CASS_ERROR_LIB_BAD_PARAMS;
+ }
+
+ if (!cluster->config().set_cloud_secure_connection_bundle(String(path, path_length))) {
+ return CASS_ERROR_LIB_BAD_PARAMS;
+ }
+ return CASS_OK;
+}
+
void cass_cluster_free(CassCluster* cluster) { delete cluster->from(); }
} // extern "C"
diff --git a/src/cluster_connector.cpp b/src/cluster_connector.cpp
index f13cb4a80..2c194bd21 100644
--- a/src/cluster_connector.cpp
+++ b/src/cluster_connector.cpp
@@ -56,7 +56,7 @@ class RunCancelCluster : public Task {
}}} // namespace datastax::internal::core
-ClusterConnector::ClusterConnector(const ContactPointList& contact_points,
+ClusterConnector::ClusterConnector(const AddressVec& contact_points,
ProtocolVersion protocol_version, const Callback& callback)
: remaining_connector_count_(0)
, contact_points_(contact_points)
@@ -104,32 +104,14 @@ Cluster::Ptr ClusterConnector::release_cluster() {
void ClusterConnector::internal_resolve_and_connect() {
inc_ref();
- if (random_) {
+ if (random_ && !contact_points_.empty()) {
random_shuffle(contact_points_.begin(), contact_points_.end(), random_);
}
- for (ContactPointList::const_iterator it = contact_points_.begin(), end = contact_points_.end();
- it != end; ++it) {
- const String& contact_point = *it;
- Address address;
- // Attempt to parse the contact point string. If it's an IP address
- // then immediately add it to our resolved contact points, otherwise
- // attempt to resolve the string as a hostname.
- if (Address::from_string(contact_point, settings_.port, &address)) {
- contact_points_resolved_.push_back(address);
- } else {
- if (!resolver_) {
- resolver_.reset(new MultiResolver(bind_callback(&ClusterConnector::on_resolve, this)));
- }
- resolver_->resolve(event_loop_->loop(), contact_point, settings_.port,
- settings_.control_connection_settings.connection_settings.socket_settings
- .resolve_timeout_ms);
- }
- }
+ resolver_ = settings_.cluster_metadata_resolver_factory->new_instance(settings_);
- if (!resolver_) {
- internal_connect_all();
- }
+ resolver_->resolve(event_loop_->loop(), contact_points_,
+ bind_callback(&ClusterConnector::on_resolve, this));
}
void ClusterConnector::internal_connect(const Address& address, ProtocolVersion version) {
@@ -142,21 +124,6 @@ void ClusterConnector::internal_connect(const Address& address, ProtocolVersion
->connect(event_loop_->loop());
}
-void ClusterConnector::internal_connect_all() {
- if (contact_points_resolved_.empty()) {
- error_code_ = CLUSTER_ERROR_NO_HOSTS_AVAILABLE;
- error_message_ = "Unable to connect to any contact points";
- finish();
- return;
- }
- remaining_connector_count_ = contact_points_resolved_.size();
- for (AddressVec::const_iterator it = contact_points_resolved_.begin(),
- end = contact_points_resolved_.end();
- it != end; ++it) {
- internal_connect(*it, protocol_version_);
- }
-}
-
void ClusterConnector::internal_cancel() {
error_code_ = CLUSTER_CANCELED;
if (resolver_) resolver_->cancel();
@@ -194,37 +161,28 @@ void ClusterConnector::on_error(ClusterConnector::ClusterError code, const Strin
maybe_finish();
}
-void ClusterConnector::on_resolve(MultiResolver* resolver) {
+void ClusterConnector::on_resolve(ClusterMetadataResolver* resolver) {
if (is_canceled()) {
finish();
return;
}
- const Resolver::Vec& resolvers = resolver->resolvers();
- for (Resolver::Vec::const_iterator it = resolvers.begin(), end = resolvers.end(); it != end;
- ++it) {
- const Resolver::Ptr resolver(*it);
- if (resolver->is_success()) {
- const AddressVec& addresses = resolver->addresses();
- if (!addresses.empty()) {
- for (AddressVec::const_iterator it = addresses.begin(), end = addresses.end(); it != end;
- ++it) {
- contact_points_resolved_.push_back(*it);
- }
- } else {
- LOG_ERROR("No addresses resolved for %s:%d\n", resolver->hostname().c_str(),
- resolver->port());
- }
- } else if (resolver->is_timed_out()) {
- LOG_ERROR("Timed out attempting to resolve address for %s:%d\n", resolver->hostname().c_str(),
- resolver->port());
- } else if (!resolver->is_canceled()) {
- LOG_ERROR("Unable to resolve address for %s:%d\n", resolver->hostname().c_str(),
- resolver->port());
- }
+ const AddressVec& resolved_contact_points(resolver->resolved_contact_points());
+
+ if (resolved_contact_points.empty()) {
+ error_code_ = CLUSTER_ERROR_NO_HOSTS_AVAILABLE;
+ error_message_ = "Unable to connect to any contact points";
+ finish();
+ return;
}
- internal_connect_all();
+ local_dc_ = resolver->local_dc();
+ remaining_connector_count_ = resolved_contact_points.size();
+ for (AddressVec::const_iterator it = resolved_contact_points.begin(),
+ end = resolved_contact_points.end();
+ it != end; ++it) {
+ internal_connect(*it, protocol_version_);
+ }
}
void ClusterConnector::on_connect(ControlConnector* connector) {
@@ -272,7 +230,7 @@ void ClusterConnector::on_connect(ControlConnector* connector) {
for (LoadBalancingPolicy::Vec::const_iterator it = policies.begin(), end = policies.end();
it != end; ++it) {
LoadBalancingPolicy::Ptr policy(*it);
- policy->init(connected_host, hosts, random_);
+ policy->init(connected_host, hosts, random_, local_dc_);
policy->register_handles(event_loop_->loop());
}
@@ -299,7 +257,7 @@ void ClusterConnector::on_connect(ControlConnector* connector) {
cluster_.reset(new Cluster(connector->release_connection(), listener_, event_loop_,
connected_host, hosts, connector->schema(), default_policy, policies,
- settings_));
+ local_dc_, connector->supported_options(), settings_));
// Clear any connection errors and set the final negotiated protocol version.
error_code_ = CLUSTER_OK;
diff --git a/src/cluster_connector.hpp b/src/cluster_connector.hpp
index 70d943ad4..e960fa058 100644
--- a/src/cluster_connector.hpp
+++ b/src/cluster_connector.hpp
@@ -19,6 +19,7 @@
#include "callback.hpp"
#include "cluster.hpp"
+#include "cluster_metadata_resolver.hpp"
#include "resolver.hpp"
namespace datastax { namespace internal {
@@ -60,7 +61,7 @@ class ClusterConnector : public RefCounted {
* @param callback A callback that is called when a connection to a contact
* point is established, if an error occurred, or all contact points failed.
*/
- ClusterConnector(const ContactPointList& contact_points, ProtocolVersion protocol_version,
+ ClusterConnector(const AddressVec& contact_points, ProtocolVersion protocol_version,
const Callback& callback);
/**
@@ -138,18 +139,17 @@ class ClusterConnector : public RefCounted {
private:
void internal_resolve_and_connect();
void internal_connect(const Address& address, ProtocolVersion version);
- void internal_connect_all();
void internal_cancel();
void finish();
void maybe_finish();
void on_error(ClusterError code, const String& message);
- void on_resolve(MultiResolver* resolver);
+ void on_resolve(ClusterMetadataResolver* resolver);
void on_connect(ControlConnector* connector);
private:
- class ConnectorMap : public DenseHashMap {
+ class ConnectorMap : public DenseHashMap {
public:
ConnectorMap() {
set_empty_key(Address::EMPTY_KEY);
@@ -159,16 +159,16 @@ class ClusterConnector : public RefCounted {
private:
Cluster::Ptr cluster_;
- MultiResolver::Ptr resolver_;
+ ClusterMetadataResolver::Ptr resolver_;
ConnectorMap connectors_;
size_t remaining_connector_count_;
- ContactPointList contact_points_;
- AddressVec contact_points_resolved_;
+ AddressVec contact_points_;
ProtocolVersion protocol_version_;
ClusterListener* listener_;
EventLoop* event_loop_;
Random* random_;
Metrics* metrics_;
+ String local_dc_;
ClusterSettings settings_;
Callback callback_;
diff --git a/src/cluster_metadata_resolver.cpp b/src/cluster_metadata_resolver.cpp
new file mode 100644
index 000000000..78ef0c70d
--- /dev/null
+++ b/src/cluster_metadata_resolver.cpp
@@ -0,0 +1,106 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include "cluster_metadata_resolver.hpp"
+
+#include "cluster.hpp"
+#include "logger.hpp"
+
+using namespace datastax::internal::core;
+
+namespace {
+
+class DefaultClusterMetadataResolver : public ClusterMetadataResolver {
+public:
+ DefaultClusterMetadataResolver(uint64_t resolve_timeout_ms, int port)
+ : resolve_timeout_ms_(resolve_timeout_ms)
+ , port_(port) {}
+
+private:
+ virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) {
+ inc_ref();
+
+ for (AddressVec::const_iterator it = contact_points.begin(), end = contact_points.end();
+ it != end; ++it) {
+ // If the port is not set then use the default port value.
+ int port = it->port() <= 0 ? port_ : it->port();
+
+ if (it->is_resolved()) {
+ resolved_contact_points_.push_back(Address(it->hostname_or_address(), port));
+ } else {
+ if (!resolver_) {
+ resolver_.reset(
+ new MultiResolver(bind_callback(&DefaultClusterMetadataResolver::on_resolve, this)));
+ }
+ resolver_->resolve(loop, it->hostname_or_address(), port, resolve_timeout_ms_);
+ }
+ }
+
+ if (!resolver_) {
+ callback_(this);
+ dec_ref();
+ return;
+ }
+ }
+
+ virtual void internal_cancel() {
+ if (resolver_) resolver_->cancel();
+ }
+
+private:
+ void on_resolve(MultiResolver* resolver) {
+ const Resolver::Vec& resolvers = resolver->resolvers();
+ for (Resolver::Vec::const_iterator it = resolvers.begin(), end = resolvers.end(); it != end;
+ ++it) {
+ const Resolver::Ptr resolver(*it);
+ if (resolver->is_success()) {
+ const AddressVec& addresses = resolver->addresses();
+ if (!addresses.empty()) {
+ for (AddressVec::const_iterator it = addresses.begin(), end = addresses.end(); it != end;
+ ++it) {
+ resolved_contact_points_.push_back(*it);
+ }
+ } else {
+ LOG_ERROR("No addresses resolved for %s:%d\n", resolver->hostname().c_str(),
+ resolver->port());
+ }
+ } else if (resolver->is_timed_out()) {
+ LOG_ERROR("Timed out attempting to resolve address for %s:%d\n",
+ resolver->hostname().c_str(), resolver->port());
+ } else if (!resolver->is_canceled()) {
+ LOG_ERROR("Unable to resolve address for %s:%d\n", resolver->hostname().c_str(),
+ resolver->port());
+ }
+ }
+
+ callback_(this);
+ dec_ref();
+ }
+
+private:
+ MultiResolver::Ptr resolver_;
+ const uint64_t resolve_timeout_ms_;
+ const int port_;
+};
+
+} // namespace
+
+ClusterMetadataResolver::Ptr
+DefaultClusterMetadataResolverFactory::new_instance(const ClusterSettings& settings) const {
+ return ClusterMetadataResolver::Ptr(new DefaultClusterMetadataResolver(
+ settings.control_connection_settings.connection_settings.socket_settings.resolve_timeout_ms,
+ settings.port));
+}
diff --git a/src/cluster_metadata_resolver.hpp b/src/cluster_metadata_resolver.hpp
new file mode 100644
index 000000000..90e91acbd
--- /dev/null
+++ b/src/cluster_metadata_resolver.hpp
@@ -0,0 +1,90 @@
+/*
+ Copyright (c) DataStax, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#ifndef DATASTAX_INTERNAL_CLUSTER_METADATA_RESOLVER_HPP
+#define DATASTAX_INTERNAL_CLUSTER_METADATA_RESOLVER_HPP
+
+#include "address.hpp"
+#include "allocated.hpp"
+#include "callback.hpp"
+#include "ref_counted.hpp"
+#include "resolver.hpp"
+
+#include
+
+namespace datastax { namespace internal { namespace core {
+
+struct ClusterSettings;
+
+/**
+ * An abstract class for resolving contact points and other cluster metadata.
+ */
+class ClusterMetadataResolver : public RefCounted {
+public:
+ typedef SharedRefPtr Ptr;
+ typedef internal::Callback Callback;
+
+ virtual ~ClusterMetadataResolver() {}
+
+ void resolve(uv_loop_t* loop, const AddressVec& contact_points, const Callback& callback) {
+ callback_ = callback;
+ internal_resolve(loop, contact_points);
+ }
+
+ virtual void cancel() { internal_cancel(); }
+
+ const AddressVec& resolved_contact_points() const { return resolved_contact_points_; }
+ const String& local_dc() const { return local_dc_; }
+
+protected:
+ virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) = 0;
+
+ virtual void internal_cancel() = 0;
+
+protected:
+ AddressVec resolved_contact_points_;
+ String local_dc_;
+ Callback callback_;
+};
+
+/**
+ * A interface for constructing instances of `ClusterMetadataResolver`s. The factory's instance
+ * creation method is passed the cluster settings object to allow cluster metadata resolvers to
+ * configure themselves with appropriate settings.
+ */
+class ClusterMetadataResolverFactory : public RefCounted {
+public:
+ typedef SharedRefPtr