diff --git a/.build.linux.sh b/.build.linux.sh index 7387ce3a6..80529e00d 100644 --- a/.build.linux.sh +++ b/.build.linux.sh @@ -54,6 +54,10 @@ install_openssl() { true # Already installed on image } +install_zlib() { + true # Already installed on image +} + install_driver() {( cd packaging diff --git a/.build.osx.sh b/.build.osx.sh index 196aa8260..e88d8dfe4 100644 --- a/.build.osx.sh +++ b/.build.osx.sh @@ -39,6 +39,16 @@ install_openssl() { fi } +install_zlib() { + if brew ls --versions zlib > /dev/null; then + if ! brew outdated zlib; then + brew upgrade zlib + fi + else + brew install zlib + fi +} + install_driver() { true } diff --git a/.build.sh b/.build.sh index 1f9c3fcdb..8c22501a0 100644 --- a/.build.sh +++ b/.build.sh @@ -35,8 +35,8 @@ else fi get_driver_version() { - local header_file=$1 - local driver_prefix=$2 + local header_file=${1} + local driver_prefix=${2} local driver_version=$(grep "#define[ \t]\+${driver_prefix}_VERSION_\(MAJOR\|MINOR\|PATCH\|SUFFIX\)" ${header_file} | awk ' BEGIN { major="?"; minor="?"; patch="?" } /_VERSION_MAJOR/ { major=$3 } @@ -61,10 +61,11 @@ get_driver_version() { install_dependencies() { install_libuv install_openssl + install_zlib } build_driver() { - local driver_prefix=$1 + local driver_prefix=${1} # Ensure build directory is cleaned (static nodes are not cleaned) [[ -d build ]] && rm -rf build @@ -72,7 +73,17 @@ build_driver() { ( cd build - cmake -DCMAKE_BUILD_TYPE=Release -D${driver_prefix}_BUILD_SHARED=On -D${driver_prefix}_BUILD_STATIC=On -D${driver_prefix}_BUILD_EXAMPLES=On -D${driver_prefix}_BUILD_UNIT_TESTS=On .. + BUILD_INTEGRATION_TESTS=Off + if [ "${CI_INTEGRATION_ENABLED}" == "true" ]; then + BUILD_INTEGRATION_TESTS=On + fi + cmake -DCMAKE_BUILD_TYPE=Release \ + -D${driver_prefix}_BUILD_SHARED=On \ + -D${driver_prefix}_BUILD_STATIC=On \ + -D${driver_prefix}_BUILD_EXAMPLES=On \ + -D${driver_prefix}_BUILD_UNIT_TESTS=On \ + -D${driver_prefix}_BUILD_INTEGRATION_TESTS=${BUILD_INTEGRATION_TESTS} \ + .. [[ -x $(which clang-format) ]] && make format-check make -j${PROCS} ) @@ -80,7 +91,7 @@ build_driver() { check_driver_exports() {( set +e #Disable fail fast for this subshell - local driver_library=$1 + local driver_library=${1} if [ -f ${driver_library} ]; then declare -a MISSING_FUNCTIONS for function in "${@:2}"; do diff --git a/CHANGELOG.md b/CHANGELOG.md index 6482461eb..09b45c3d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,61 @@ +2.14.0 +=========== + +Bug Fixes +-------- +* [CPP-819] - Ensure port is updated on already assigned contact points +* [CPP-825] - Cloud should be verifying the peer certificates CN + +2.14.0-alpha2 +=========== + +Features +-------- +* [CPP-812] - Enable warnings for implicit casts and fix problems +* [CPP-813] - Detect CaaS and change consistency default +* [CPP-817] - Provide error if mixed usage of secure connect bundle and contact points/ssl context + +Bug Fixes +-------- +* [CPP-802] - Handle prepared id mismatch when repreparing on the fly +* [CPP-815] - Schema agreement fails with SNI +* [CPP-811] - Requests won't complete if they exceed the number of streams on a connection + +2.14.0-alpha +=========== + +Features +-------- +* [CPP-787] DataStax cloud platform + * [CPP-788] Support SNI at connection level using `host_id` as host name + * [CPP-793] Add SNI support to `SocketConnector` and SSL backend + * [CPP-794] Add domain name resolution to `SocketConnector` + * [CPP-795] Replace `Address` with endpoint or host type on connection path + * [CPP-797] Events need to map from affected node address to `host_id` + * [CPP-800] Node discovery should use the `host_id` (and endpoint address) instead of the + node's rpc_address + * [CPP-790] Configuration API for DBaaS + * [CPP-791] Add creds.zip support for automatic configuration + * [CPP-798] Configure authentication and SSL from secure connection bundle configuration + * [CPP-799] Use metadata service to determine contact points + * [CPP-788] Support SNI at connection level using `host_id` as host name + * [CPP-803] Propagate `local_dc` from `CloudClusterMetadataResolver` to load balancing policies + +Bug Fixes +-------- +* [CPP-786] Fix TLS 1.3 support +* [CPP-806] Fix handling of no contact points + +Other +-------- +* [CPP-796] Correct compiler flags for mixed C and C++ projects + +Community +-------- +* [CPP-754] Broken build with GCC 9 (eevans) +* Add openssl to the required library list in pkg_config file (accelerated) +* Allow random to work with 0 (joeyhub) + 2.13.0 =========== diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c0c828cf..7bc63bb77 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,7 +43,7 @@ option(CASS_USE_OPENSSL "Use OpenSSL" ON) option(CASS_USE_STATIC_LIBS "Link static libraries when building executables" OFF) option(CASS_USE_STD_ATOMIC "Use C++11 atomics library" OFF) option(CASS_USE_TCMALLOC "Use tcmalloc" OFF) -option(CASS_USE_ZLIB "Use zlib" OFF) +option(CASS_USE_ZLIB "Use zlib" ON) option(CASS_USE_TIMERFD "Use timerfd (Linux only)" ON) # Handle testing dependencies @@ -108,8 +108,6 @@ CassAddIncludes() CassFindSourceFiles() CassConfigure() -set(TEST_CXX_FLAGS ${CASS_TEST_CXX_FLAGS}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}) @@ -177,9 +175,6 @@ CassConfigureTests() # no need to update CMakeLists.txt! if(CASS_BUILD_EXAMPLES) - if(CASS_USE_STATIC_LIBS) - set(CASS_EXAMPLE_C_FLAGS "${CASS_EXAMPLE_C_FLAGS} -DCASS_STATIC") - endif() CassBuildExamples("examples") endif() diff --git a/README.md b/README.md index 798625418..d6d869255 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,7 @@ provided with the distribution: * [Reverse DNS] with SSL peer identity verification support * Randomized contact points * [Speculative execution] +* Support for [DataStax Constellation] Cloud Data Platform ## Compatibility @@ -78,7 +79,6 @@ __Disclaimer__: DataStax products do not support big-endian systems. * JIRA: https://datastax-oss.atlassian.net/browse/CPP * Mailing List: https://groups.google.com/a/lists.datastax.com/forum/#!forum/cpp-driver-user -* DataStax Academy via Slack: https://academy.datastax.com/slack ## Feedback Requested @@ -198,6 +198,7 @@ specific language governing permissions and limitations under the License. [ubuntu-16-04-dependencies]: http://downloads.datastax.com/cpp-driver/ubuntu/16.04/dependencies [ubuntu-18-04-dependencies]: http://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies [windows-dependencies]: http://downloads.datastax.com/cpp-driver/windows/dependencies +[DataStax Constellation]: https://constellation.datastax.com [Asynchronous API]: http://datastax.github.io/cpp-driver/topics/#futures [Simple]: http://datastax.github.io/cpp-driver/topics/#executing-queries diff --git a/appveyor.ps1 b/appveyor.ps1 index 24771dfda..fb732be8e 100644 --- a/appveyor.ps1 +++ b/appveyor.ps1 @@ -47,6 +47,18 @@ Function Perl-Version-Information { } } +Function CMake-Version-Information { + If (Get-Command "cmake" -ErrorAction SilentlyContinue) { + $temporary_file = New-TemporaryFile + Start-Process -FilePath cmake -ArgumentList "--version" -RedirectStandardOutput $($temporary_file) -Wait -NoNewWindow + $output = Get-Content "$($temporary_file)" -Raw + Write-Host "$($output.Trim())" -BackgroundColor DarkBlue + Remove-Item $temporary_file + } Else { + Write-Host "CMake is not available" -BackgroundColor DarkRed + } +} + Function Build-Configuration-Information { $output = @" Visual Studio: $($Env:CMAKE_GENERATOR.Split(" ")[-2]) [$($Env:CMAKE_GENERATOR.Split(" ")[-1])] @@ -55,6 +67,7 @@ Boost: v$($Env:BOOST_VERSION) libssh2: v$($Env:LIBSSH2_VERSION) libuv: v$($Env:LIBUV_VERSION) OpenSSL: v$(Get-OpenSSL-Version) +zlib: v$($Env:ZLIB_VERSION) Build Number: $($Env:APPVEYOR_BUILD_NUMBER) Branch: $($Env:APPVEYOR_REPO_BRANCH) SHA: $(Get-Commit-Sha) @@ -104,17 +117,15 @@ Function Initialize-Build-Environment { $libuv_version = $Env:LIBUV_VERSION $openssl_version = Get-OpenSSL-Version $Env:OPENSSL_VERSION = $openssl_version + $zlib_version = $Env:ZLIB_VERSION $kerberos_version = "4.1" $bison_version = "2.4.1" $perl_version = "5.26.2.1" # Determine the platform and create associate environment variables - $architecture = "32" - If ($Env:Platform -Like "x64") { - $architecture = "64" - } - $lib_architecture = "lib$($architecture)" - $windows_architecture = "win$($architecture)" + $Env:CMAKE_PLATFORM = $Env:Platform + $lib_architecture = "lib64" + $windows_architecture = "win64" # Determine which header file to use for determine driver version $driver_header_file = "cassandra.h" @@ -149,13 +160,15 @@ Function Initialize-Build-Environment { $Env:LIBUV_ROOT_DIR = "$($dependencies_location_prefix)/libuv-$($libuv_version)" $Env:OPENSSL_BASE_DIR = "$($dependencies_location_prefix)/openssl-$($openssl_version)" $Env:OPENSSL_ROOT_DIR = "$($Env:OPENSSL_BASE_DIR)/shared" + $Env:ZLIB_ROOT_DIR = "$($dependencies_location_prefix)/zlib-$($zlib_version)" $Env:DRIVER_INSTALL_DIR = "C:/projects/driver/lib" $Env:DRIVER_ARTIFACTS_DIR = "C:/projects/driver/artifacts" $Env:DRIVER_ARTIFACTS_LOGS_DIR = "$($Env:DRIVER_ARTIFACTS_DIR)/logs" # Generate the environment variables for the third party archives - $Env:LIBUV_ARTIFACT_ARCHIVE = "libuv-$($libuv_version)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" - $Env:OPENSSL_ARTIFACT_ARCHIVE = "openssl-$($openssl_version)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + $Env:LIBUV_ARTIFACT_ARCHIVE = "libuv-$($libuv_version)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + $Env:OPENSSL_ARTIFACT_ARCHIVE = "openssl-$($openssl_version)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + $Env:ZLIB_ARTIFACT_ARCHIVE = "zlib-$($zlib_version)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" # Generate DataStax Enterprise specific environment variables If ($Env:DRIVER_TYPE -Like "dse") { @@ -185,15 +198,15 @@ Function Initialize-Build-Environment { # Generate the archive name for the driver test and examples artifacts $build_version = "$($Env:APPVEYOR_BUILD_NUMBER)-$($Env:APPVEYOR_REPO_BRANCH)" # TODO: Re-enable OpenSSL version appending if multiple OpenSSL versions are enabled - #$Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-openssl-$($Env:OPENSSL_MAJOR_MINOR)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" - #$Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-openssl-$($Env:OPENSSL_MAJOR_MINOR)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" - $Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" - $Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + #$Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-openssl-$($Env:OPENSSL_MAJOR_MINOR)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + #$Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-openssl-$($Env:OPENSSL_MAJOR_MINOR)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + $Env:DRIVER_ARTIFACT_EXAMPLES_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-examples-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + $Env:DRIVER_ARTIFACT_TESTS_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-tests-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" # Generate the archive name for the driver packaging # TODO: Re-enable OpenSSL version appending if multiple OpenSSL versions are enabled - #$Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-openssl-$($Env:OPENSSL_MAJOR_MINOR)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" - $Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-win$($architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + #$Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-openssl-$($Env:OPENSSL_MAJOR_MINOR)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" + $Env:DRIVER_ARTIFACT_ARCHIVE = "$($driver_archive_prefix)-cpp-driver-$($Env:DRIVER_VERSION)-$($windows_architecture)-msvc$($Env:VISUAL_STUDIO_INTERNAL_VERSION).zip" # Generate additional download/install environments for third party build requirements $Env:BISON_BINARIES_ARCHIVE = "bison-$($bison_version)-bin.zip" @@ -304,8 +317,9 @@ Function Install-Driver-Environment { } } - # Display the Perl version information + # Display the Perl and CMake version information Perl-Version-Information + CMake-Version-Information # Determine the location of the CMake modules (external projects) $cmake_modules_dir = "$($Env:APPVEYOR_BUILD_FOLDER -Replace `"\\`", `"/`")/" @@ -314,12 +328,6 @@ Function Install-Driver-Environment { } $cmake_modules_dir += "cmake/modules" - # Determine the CMake generator to utilize - $cmake_generator = $Env:CMAKE_GENERATOR - If ($Env:Platform -Like "x64") { - $cmake_generator += " Win64" - } - # Build and install the dependencies (if needed; cached) $dependencies_build_location_prefix = "C:/projects/dependencies/build/" If (-Not (Test-Path -Path "$($Env:LIBUV_ROOT_DIR)/lib")) { # lib directory checked due to external project being CMake (automatically creates root directory) @@ -342,7 +350,7 @@ add_dependencies(`${PROJECT_NAME} `${LIBUV_LIBRARY_NAME}) $cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force Write-Host "Configuring libuv" - cmake -G "$($cmake_generator)" -DBUILD_SHARED_LIBS=On "-DLIBUV_VERSION=$($Env:LIBUV_VERSION)" "-DLIBUV_INSTALL_PREFIX=$($Env:LIBUV_ROOT_DIR)" . + cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM -DBUILD_SHARED_LIBS=On "-DLIBUV_VERSION=$($Env:LIBUV_VERSION)" "-DLIBUV_INSTALL_PREFIX=$($Env:LIBUV_ROOT_DIR)" . If ($LastExitCode -ne 0) { If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") { Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "libuv Output Log" @@ -395,7 +403,7 @@ add_dependencies(`${PROJECT_NAME} `${OPENSSL_LIBRARY_NAME}) if ("$_" -Like "shared") { $shared_libs = "On" } - cmake -G "$($cmake_generator)" "-DBUILD_SHARED_LIBS=$($shared_libs)" "-DOPENSSL_VERSION=$($Env:OPENSSL_VERSION)" "-DOPENSSL_INSTALL_PREFIX=$($Env:OPENSSL_BASE_DIR)/$_" . + cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-DBUILD_SHARED_LIBS=$($shared_libs)" "-DOPENSSL_VERSION=$($Env:OPENSSL_VERSION)" "-DOPENSSL_INSTALL_PREFIX=$($Env:OPENSSL_BASE_DIR)/$_" . If ($LastExitCode -ne 0) { If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") { Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "OpenSSL Output Log" @@ -423,6 +431,53 @@ add_dependencies(`${PROJECT_NAME} `${OPENSSL_LIBRARY_NAME}) } } + If (-Not (Test-Path -Path "$($Env:ZLIB_ROOT_DIR)/lib")) { + New-Item -ItemType Directory -Force -Path "$($dependencies_build_location_prefix)/zlib" | Out-Null + Push-Location -Path "$($dependencies_build_location_prefix)/zlib" + + $cmakelists_contents = @" +cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR) +project(zlib) +set(PROJECT_DISPLAY_NAME "AppVeyor CI Build for zlib") +set(PROJECT_MODULE_DIR $cmake_modules_dir) +set(CMAKE_MODULE_PATH `${CMAKE_MODULE_PATH} `${PROJECT_MODULE_DIR}) +include(ExternalProject-zlib) +set(GENERATED_SOURCE_FILE `${CMAKE_CURRENT_BINARY_DIR}/main.cpp) +file(REMOVE `${GENERATED_SOURCE_FILE}) +file(WRITE `${GENERATED_SOURCE_FILE} "int main () { return 0; }") +add_executable(`${PROJECT_NAME} `${GENERATED_SOURCE_FILE}) +add_dependencies(`${PROJECT_NAME} `${ZLIB_LIBRARY_NAME}) +"@ + $cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force + + Write-Host "Configuring zlib" + cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM -DBUILD_SHARED_LIBS=On "-DZLIB_VERSION=$($Env:ZLIB_VERSION)" "-DZLIB_INSTALL_PREFIX=$($Env:ZLIB_ROOT_DIR)" . + If ($LastExitCode -ne 0) { + If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") { + Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "zlib Output Log" + } + If (Test-Path -Path "build/CMakeFiles/CMakeError.log") { + Push-AppveyorArtifact "build/CMakeFiles/CMakeError.log" -DeploymentName "zlib Error Log" + } + Pop-Location + Throw "Failed to configure zlib for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)" + } + Write-Host "Building and Installing zlib" + cmake --build . --config RelWithDebInfo + If ($LastExitCode -ne 0) { + If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") { + Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "zlib Output Log" + } + If (Test-Path -Path "build/CMakeFiles/CMakeError.log") { + Push-AppveyorArtifact "build/CMakeFiles/CMakeError.log" -DeploymentName "zlib Error Log" + } + Pop-Location + Throw "Failed to build zlib for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)" + } + + Pop-Location + } + # Handle installation of DataStax Enterprise dependencies If ($Env:DRIVER_TYPE -Like "dse") { # Determine if Kerberos for Windows should be installed (cached) @@ -476,7 +531,7 @@ add_dependencies(`${PROJECT_NAME} `${BOOST_LIBRARY_NAME}) $cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force Write-Host "Configuring Boost" - cmake -G "$($cmake_generator)" "-DBOOST_VERSION=$($Env:BOOST_VERSION)" "-DBOOST_INSTALL_PREFIX=$($Env:BOOST_ROOT)" . + cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-DBOOST_VERSION=$($Env:BOOST_VERSION)" "-DBOOST_INSTALL_PREFIX=$($Env:BOOST_ROOT)" . If ($LastExitCode -ne 0) { If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") { Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "Boost Output Log" @@ -524,7 +579,7 @@ add_dependencies(`${PROJECT_NAME} `${LIBSSH2_LIBRARY_NAME}) $cmakelists_contents | Out-File -FilePath "CMakeLists.txt" -Encoding Utf8 -Force Write-Host "Configuring libssh2" - cmake -G "$($cmake_generator)" "-DLIBSSH2_VERSION=$($Env:LIBSSH2_VERSION)" "-DLIBSSH2_INSTALL_PREFIX=$($Env:LIBSSH2_ROOT_DIR)" . + cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-DLIBSSH2_VERSION=$($Env:LIBSSH2_VERSION)" "-DLIBSSH2_INSTALL_PREFIX=$($Env:LIBSSH2_ROOT_DIR)" . If ($LastExitCode -ne 0) { If (Test-Path -Path "build/CMakeFiles/CMakeOutput.log") { Push-AppveyorArtifact "build/CMakeFiles/CMakeOutput.log" -DeploymentName "libssh2 Output Log" @@ -562,16 +617,9 @@ add_dependencies(`${PROJECT_NAME} `${LIBSSH2_LIBRARY_NAME}) } Function Build-Driver { - # Determine the CMake generator to utilize - $cmake_generator = $Env:CMAKE_GENERATOR - If ($Env:Platform -Like "x64") { - $cmake_generator += " Win64" - } - # Ensure Boost atomic is used for Visual Studio 2010 (increased performance) $use_boost_atomic = "Off" - If ($Env:VISUAL_STUDIO_INTERNAL_VERSION -Like "100" -Or - ($Env:VISUAL_STUDIO_INTERNAL_VERSION -Like "110" -And $Env:Platform -Like "x86")) { + If ($Env:VISUAL_STUDIO_INTERNAL_VERSION -Like "100") { $use_boost_atomic = "On" # Enable Boost atomic usage } @@ -583,7 +631,7 @@ Function Build-Driver { New-Item -ItemType Directory -Force -Path "$($Env:APPVEYOR_BUILD_FOLDER)/build" Push-Location "$($Env:APPVEYOR_BUILD_FOLDER)/build" Write-Host "Configuring DataStax C/C++ $($driver_type) Driver" - cmake -G "$($cmake_generator)" "-D$($Env:DRIVER_TYPE)_MULTICORE_COMPILATION=On" "-D$($Env:DRIVER_TYPE)_USE_OPENSSL=On" "-D$($Env:DRIVER_TYPE)_USE_BOOST_ATOMIC=$($use_boost_atomic)" "-D$($Env:DRIVER_TYPE)_BUILD_EXAMPLES=On" "-D$($Env:DRIVER_TYPE)_BUILD_TESTS=On" "-D$($Env:DRIVER_TYPE)_USE_LIBSSH2=On" "-DCMAKE_INSTALL_PREFIX=`"$($Env:DRIVER_INSTALL_DIR)`"" .. + cmake -G "$($Env:CMAKE_GENERATOR)" -A $Env:CMAKE_PLATFORM "-D$($Env:DRIVER_TYPE)_MULTICORE_COMPILATION=On" "-D$($Env:DRIVER_TYPE)_USE_OPENSSL=On" "-D$($Env:DRIVER_TYPE)_USE_ZLIB=On" "-D$($Env:DRIVER_TYPE)_USE_BOOST_ATOMIC=$($use_boost_atomic)" "-D$($Env:DRIVER_TYPE)_BUILD_EXAMPLES=On" "-D$($Env:DRIVER_TYPE)_BUILD_TESTS=On" "-D$($Env:DRIVER_TYPE)_USE_LIBSSH2=On" "-DCMAKE_INSTALL_PREFIX=`"$($Env:DRIVER_INSTALL_DIR)`"" .. If ($LastExitCode -ne 0) { Pop-Location Throw "Failed to configure DataStax C/C++ $($driver_type) Driver for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)" @@ -707,6 +755,17 @@ a -tzip "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)" -r "$($E If ($process.ExitCode -ne 0) { Throw "Failed to archive OpenSSL for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)" } + + # Clean up the library dependency directories for zlib packaging + New-Item -ItemType Directory -Force -Path "$($Env:DRIVER_ARTIFACTS_DIR)/zlib" | Out-Null + Copy-Item -Force -Recurse -Path "$($Env:ZLIB_ROOT_DIR)/*" "$($Env:DRIVER_ARTIFACTS_DIR)/zlib" | Out-Null + $argument_list = @" +a -tzip "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:ZLIB_ARTIFACT_ARCHIVE)" -r "$($Env:DRIVER_ARTIFACTS_DIR)/zlib/*" +"@ + $process = Start-Process -FilePath 7z -ArgumentList $argument_list -PassThru -Wait -NoNewWindow + If ($process.ExitCode -ne 0) { + Throw "Failed to archive zlib for MSVC $($Env:VISUAL_STUDIO_INTERNAL_VERSION)-$($Env:Platform)" + } } Function Push-Artifacts { @@ -721,6 +780,7 @@ Function Push-Artifacts { Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:DRIVER_ARTIFACT_TESTS_ARCHIVE)" -DeploymentName "DataStax C/C++ $($driver_type) Driver Tests" Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:LIBUV_ARTIFACT_ARCHIVE)" -DeploymentName "libuv v$($Env:LIBUV_VERSION)" Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)" -DeploymentName "OpenSSL v$($Env:OPENSSL_VERSION)" + Push-AppveyorArtifact "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:ZLIB_ARTIFACT_ARCHIVE)" -DeploymentName "zlib v$($Env:ZLIB_VERSION)" } } @@ -781,6 +841,8 @@ Function Publish-Artifacts { #TODO: Need to handle OpenSSL v1.1.x if enabled $openssl_uri = "$($base_uri)/dependencies/openssl/v$($Env:OPENSSL_VERSION)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)" $openssl_archive = "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:OPENSSL_ARTIFACT_ARCHIVE)" + $zlib_uri = "$($base_uri)/dependencies/zlib/v$($Env:ZLIB_VERSION)/$($Env:ZLIB_ARTIFACT_ARCHIVE)" + $zlib_archive = "$($Env:DRIVER_ARTIFACTS_DIR)/$($Env:ZLIB_ARTIFACT_ARCHIVE)" # Publish/Upload the driver and it dependencies to Artifactory $is_failure = $False @@ -798,6 +860,10 @@ Function Publish-Artifacts { $is_failure = $True $failed_upload += "OpenSSL" } + If ((Publish-Artifact-To-Artifactory -Uri "$($zlib_uri)" -FilePath "$($zlib_archive)") -ne 0) { + $is_failure = $True + $failed_upload += "zlib" + } # Check to see if there was a failure uploading the artifacts If ($is_failure) { diff --git a/appveyor.yml b/appveyor.yml index 71df5120e..7c69713b0 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -25,7 +25,6 @@ cache: - C:\projects\dependencies\bin -> appveyor.ps1 - C:\projects\dependencies\libs -> appveyor.yml platform: - - x86 - x64 hosts: cpp-driver.hostname.: 127.254.254.254 @@ -36,14 +35,14 @@ environment: secure: PLLc0JCL9I7y8zw8p9meQhxXGAbyWCjyWO17xKOsyxE= ARTIFACTORY_PASSWORD: secure: h28bN22Py3CZPqrWoZWEjIFnpes+kslusCKP1mRYdUqBEf+OO1kFEQTZ9DGD7tuCSIIRDI3Mf9LX8zgUdmdlZA== - APPVEYOR_BUILD_WORKER_CLOUD: gce APPVEYOR_IGNORE_COMMIT_FILTERING_ON_TAG: true DRIVER_TYPE: CASS BOOST_VERSION: 1.69.0 LIBSSH2_VERSION: 1.9.0 - LIBUV_VERSION: 1.29.1 + LIBUV_VERSION: 1.33.0 OPENSSL_1_0_VERSION: 1.0.2s OPENSSL_1_1_VERSION: 1.1.1c + ZLIB_VERSION: 1.2.11 matrix: - CMAKE_GENERATOR: Visual Studio 10 2010 OPENSSL_MAJOR_MINOR: 1.0 @@ -85,6 +84,16 @@ environment: # OPENSSL_MAJOR_MINOR: 1.1 # VISUAL_STUDIO_INTERNAL_VERSION: 141 # APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + - CMAKE_GENERATOR: Visual Studio 16 2019 + OPENSSL_MAJOR_MINOR: 1.0 + VISUAL_STUDIO_INTERNAL_VERSION: 142 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 + BOOST_VERSION: 1.70.0 +# - CMAKE_GENERATOR: Visual Studio 16 2019 +# OPENSSL_MAJOR_MINOR: 1.1 +# VISUAL_STUDIO_INTERNAL_VERSION: 142 +# APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 +# BOOST_VERSION: 1.70.0 #init: # - ps: iex ((New-Object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1')) install: diff --git a/build.yaml b/build.yaml index 4d3cce80e..699f87268 100644 --- a/build.yaml +++ b/build.yaml @@ -9,6 +9,20 @@ schedules: slack: cpp-driver-dev-bots branches: include: ["/CPP-\\d+/", "master"] + env_vars: | + CI_SCHEDULE=commit + nightly: + schedule: nightly + notify: + slack: cpp-driver-dev-bots + branches: + include: ["/CPP-\\d+/", "master"] + matrix: + exclude: + - os: ['ubuntu/trusty64/cpp', 'ubuntu/xenial64/cpp', 'centos/6-64/cpp', 'centos/7-64/cpp', 'osx/high-sierra'] + env_vars: | + CI_SCHEDULE=nightly + CI_INTEGRATION_ENABLED=true architecture: - x64 os: @@ -19,7 +33,7 @@ os: - centos/7-64/cpp - osx/high-sierra env: - LIBUV_VERSION: 1.29.1 + LIBUV_VERSION: 1.33.0 build: - script: | . .build.sh @@ -33,11 +47,15 @@ build: build/cassandra-unit-tests --gtest_output=xml:cassandra-unit-test-results.xml + if [ -f build/cassandra-integration-tests ]; then + build/cassandra-integration-tests --category=cassandra --keep-clusters --verbose --gtest_filter=DbaasTests* --gtest_output=xml:dbaas-integration-test-results.xml + fi + install_driver test_installed_driver 'cassandra' - xunit: - - "*unit-test-results.xml" + - "*test-results.xml" package: allow_empty: true include: # list of files and glob paths to include in the artifact, relative to the current working directory @@ -46,6 +64,8 @@ release: matrix: exclude: - os: [ osx/high-sierra ] + env_vars: | + CI_SCHEDULE=release after: each: - script: | diff --git a/cmake/modules/CppDriver.cmake b/cmake/modules/CppDriver.cmake index 61f16470a..9ad759a55 100644 --- a/cmake/modules/CppDriver.cmake +++ b/cmake/modules/CppDriver.cmake @@ -138,21 +138,32 @@ endmacro() # Arguments: # prefix - prefix of global variable names that contain specific # info on building the library (e.g. CASS or DSE). -# Input: PROJECT_LIB_NAME, PROJECT_VERSION_STRING, PROJECT_VERSION_MAJOR, -# PROJECT_CXX_LINKER_FLAGS, *_DRIVER_CXX_FLAGS -# Output: CASS_INCLUDES and CASS_LIBS +# Input: PROJECT_LIB_NAME, PROJECT_VERSION_STRING, PROJECT_VERSION_MAJOR #------------------------ macro(CassConfigureShared prefix) target_link_libraries(${PROJECT_LIB_NAME} ${${prefix}_LIBS}) set_target_properties(${PROJECT_LIB_NAME} PROPERTIES OUTPUT_NAME ${PROJECT_LIB_NAME}) set_target_properties(${PROJECT_LIB_NAME} PROPERTIES VERSION ${PROJECT_VERSION_STRING} SOVERSION ${PROJECT_VERSION_MAJOR}) - set_target_properties(${PROJECT_LIB_NAME} PROPERTIES LINK_FLAGS "${PROJECT_CXX_LINKER_FLAGS}") set_target_properties(${PROJECT_LIB_NAME} PROPERTIES COMPILE_PDB_NAME "${PROJECT_LIB_NAME}" COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}") - set_property( - TARGET ${PROJECT_LIB_NAME} - APPEND PROPERTY COMPILE_FLAGS "${${prefix}_DRIVER_CXX_FLAGS} -DCASS_BUILDING") + set(STATIC_COMPILE_FLAGS "-D${prefix}_BUILDING") + if("${prefix}" STREQUAL "DSE") + set(STATIC_COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -DCASS_BUILDING") + endif() + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + set_property( + TARGET ${PROJECT_LIB_NAME} + APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Wconversion -Wno-sign-conversion -Wno-shorten-64-to-32 -Wno-undefined-var-template -Werror") + elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") # To many superfluous warnings generated with GCC when using -Wconversion (see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40752) + set_property( + TARGET ${PROJECT_LIB_NAME} + APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Werror") + elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + set_property( + TARGET ${PROJECT_LIB_NAME} + APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} /we4800") + endif() endmacro() #------------------------ @@ -164,20 +175,38 @@ endmacro() # prefix - prefix of global variable names that contain specific # info on building the library (e.g. CASS or DSE). # Input: PROJECT_LIB_NAME_STATIC, PROJECT_VERSION_STRING, PROJECT_VERSION_MAJOR, -# PROJECT_CXX_LINKER_FLAGS, *_DRIVER_CXX_FLAGS -# Output: CASS_INCLUDES and CASS_LIBS +# *_USE_STATIC_LIBS #------------------------ macro(CassConfigureStatic prefix) target_link_libraries(${PROJECT_LIB_NAME_STATIC} ${${prefix}_LIBS}) set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES OUTPUT_NAME ${PROJECT_LIB_NAME_STATIC}) set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES VERSION ${PROJECT_VERSION_STRING} SOVERSION ${PROJECT_VERSION_MAJOR}) - set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES LINK_FLAGS "${PROJECT_CXX_LINKER_FLAGS}") set_target_properties(${PROJECT_LIB_NAME_STATIC} PROPERTIES COMPILE_PDB_NAME "${PROJECT_LIB_NAME_STATIC}" COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}") - set_property( - TARGET ${PROJECT_LIB_NAME_STATIC} - APPEND PROPERTY COMPILE_FLAGS "${${prefix}_DRIVER_CXX_FLAGS} -DCASS_STATIC") + set(STATIC_COMPILE_FLAGS "-D${prefix}_STATIC") + if("${prefix}" STREQUAL "DSE") + set(STATIC_COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -DCASS_STATIC") + endif() + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + set_property( + TARGET ${PROJECT_LIB_NAME_STATIC} + APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Wconversion -Wno-sign-conversion -Wno-shorten-64-to-32 -Wno-undefined-var-template -Werror") + elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") # To many superfluous warnings generated with GCC when using -Wconversion (see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40752) + set_property( + TARGET ${PROJECT_LIB_NAME_STATIC} + APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} -Werror") + elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + set_property( + TARGET ${PROJECT_LIB_NAME_STATIC} + APPEND PROPERTY COMPILE_FLAGS "${STATIC_COMPILE_FLAGS} /we4800") + endif() + + # Update the CXX flags to indicate the use of the static library + if(${prefix}_USE_STATIC_LIBS) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${STATIC_COMPILE_FLAGS}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${STATIC_COMPILE_FLAGS}") + endif() endmacro() #------------------------ @@ -444,6 +473,48 @@ macro(CassRapidJson) include_directories(${RAPID_JSON_INCLUDE_DIR}) endmacro() +#------------------------ +# CassMiniZip +# +# Set some MINIZIP_* variables, set up some source_group's, and add the +# MINIZIP include dir to our list of include dirs. +# +# Input: CASS_SRC_DIR +# Output: MINIZIP_INCLUDE_DIR, MINIZIP_HEADER_FILES, MINIZIP_SOURCE_FILES +#------------------------ +macro(CassMiniZip) + if (ZLIB_FOUND) + set(MINIZIP_INCLUDE_DIR "${CASS_SRC_DIR}/third_party/minizip") + set(MINIZIP_HEADER_FILES ${MINIZIP_INCLUDE_DIR}/crypt.h + ${MINIZIP_INCLUDE_DIR}/ioapi.h + ${MINIZIP_INCLUDE_DIR}/unzip.h) + set(MINIZIP_SOURCE_FILES ${MINIZIP_INCLUDE_DIR}/ioapi.c + ${MINIZIP_INCLUDE_DIR}/unzip.c) + source_group("Header Files\\minizip" FILES ${MINIZIP_HEADER_FILES}) + source_group("Source Files\\minizip" FILES ${MINIZIP_SOURCE_FILES}) + include_directories(${MINIZIP_INCLUDE_DIR}) + endif() +endmacro() + +#------------------------ +# CassHttpParser +# +# Set some HTTP_PARSER_* variables, set up some source_group's, and add the +# HTTP_PARSER include dir to our list of include dirs. +# +# Input: CASS_SRC_DIR +# Output: HTTP_PARSER_INCLUDE_DIR, HTTP_PARSER_HEADER_FILES, +# HTTP_PARSER_SOURCE_FILES +#------------------------ +macro(CassHttpParser) + set(HTTP_PARSER_INCLUDE_DIR "${CASS_SRC_DIR}/third_party/http-parser") + set(HTTP_PARSER_HEADER_FILES ${HTTP_PARSER_INCLUDE_DIR}/http_parser.h) + set(HTTP_PARSER_SOURCE_FILES ${HTTP_PARSER_INCLUDE_DIR}/http_parser.c) + source_group("Header Files\\http-parser" FILES ${HTTP_PARSER_HEADER_FILES}) + source_group("Source Files\\http-parser" FILES ${HTTP_PARSER_SOURCE_FILES}) + include_directories(${HTTP_PARSER_INCLUDE_DIR}) +endmacro() + #------------------------ # CassSimulacron # @@ -672,6 +743,7 @@ macro(CassUseZlib) # Assign zlib properties set(CASS_INCLUDES ${CASS_INCLUDES} ${ZLIB_INCLUDE_DIRS}) set(CASS_LIBS ${CASS_LIBS} ${ZLIB_LIBRARIES}) + set(HAVE_ZLIB On) else() message(WARNING "Could not find zlib, try to set the path to zlib root folder in the system variable ZLIB_ROOT_DIR") message(WARNING "zlib libraries will not be linked into build") @@ -694,8 +766,6 @@ endmacro() # # Input: CASS_USE_STD_ATOMIC, CASS_USE_BOOST_ATOMIC, CASS_MULTICORE_COMPILATION # CASS_USE_STATIC_LIBS -# Output: CASS_USE_STD_ATOMIC, CASS_DRIVER_CXX_FLAGS, CASS_TEST_CXX_FLAGS, -# CASS_EXAMPLE_C_FLAGS #------------------------ macro(CassSetCompilerFlags) # Force OLD style of implicitly dereferencing variables @@ -818,7 +888,6 @@ macro(CassSetCompilerFlags) # TODO(mpenick): Fix these "possible loss of data" warnings add_definitions(/wd4244) add_definitions(/wd4267) - add_definitions(/wd4800) # Performance warning due to automatic compiler casting from int to bool # Add preprocessor definitions for proper compilation add_definitions(-D_CRT_SECURE_NO_WARNINGS) # Remove warnings for not using safe functions (TODO: Fix codebase to be more secure for Visual Studio) @@ -826,10 +895,8 @@ macro(CassSetCompilerFlags) add_definitions(-D_SILENCE_TR1_NAMESPACE_DEPRECATION_WARNING) # Remove warnings for TR1 deprecation (Visual Studio 15 2017); caused by sparsehash # Create the project, example, and test flags - set(CASS_DRIVER_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}") - set(CASS_EXAMPLE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}") - # Enable bigobj for large object files during compilation (Cassandra types integration test) - set(CASS_TEST_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS} /bigobj") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}") # Assign additional library requirements for Windows set(CASS_LIBS ${CASS_LIBS} iphlpapi psapi wsock32 crypt32 ws2_32 userenv) @@ -846,19 +913,16 @@ macro(CassSetCompilerFlags) # OpenSSL is deprecated on later versions of Mac OS X. The long-term solution # is to provide a CommonCryto implementation. if (APPLE AND CASS_USE_OPENSSL) - set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -Wno-deprecated-declarations") - set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -Wno-deprecated-declarations") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") endif() # Enable C++11 support to use std::atomic if(CASS_USE_STD_ATOMIC) - set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -std=c++11") - set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -std=c++11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") endif() - set(CASS_DRIVER_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS} -Werror") - set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}") - set(CASS_EXAMPLE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}") elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") # Clang/Intel specific compiler options # I disabled long-long warning because boost generates about 50 such warnings @@ -869,19 +933,16 @@ macro(CassSetCompilerFlags) # OpenSSL is deprecated on later versions of Mac OS X. The long-term solution # is to provide a CommonCryto implementation. if (APPLE AND CASS_USE_OPENSSL) - set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -Wno-deprecated-declarations") - set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -Wno-deprecated-declarations") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") endif() # Enable C++11 support to use std::atomic if(CASS_USE_STD_ATOMIC) - set(CASS_DRIVER_CXX_FLAGS "${CASS_DRIVER_CXX_FLAGS} -std=c++11") - set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} -std=c++11") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") endif() - set(CASS_DRIVER_CXX_FLAGS " ${CMAKE_CXX_FLAGS} ${CASS_DRIVER_CXX_FLAGS} ${WARNING_COMPILER_FLAGS} -Werror") - set(CASS_TEST_CXX_FLAGS "${CASS_TEST_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}") - set(CASS_EXAMPLE_C_FLAGS "${CMAKE_C_FLAGS} -std=c89 ${WARNING_COMPILER_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${WARNING_COMPILER_FLAGS}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${WARNING_COMPILER_FLAGS}") else() message(FATAL_ERROR "Unsupported compiler: ${CMAKE_CXX_COMPILER_ID}") endif() @@ -981,6 +1042,14 @@ macro(CassFindSourceFiles) ${CASS_SRC_DIR}/ssl/ssl_no_impl.cpp) endif() + CassMiniZip() + set(CASS_INC_FILES ${CASS_INC_FILES} ${MINIZIP_HEADER_FILES}) + set(CASS_SRC_FILES ${CASS_SRC_FILES} ${MINIZIP_SOURCE_FILES}) + + CassHttpParser() + set(CASS_INC_FILES ${CASS_INC_FILES} ${HTTP_PARSER_HEADER_FILES}) + set(CASS_SRC_FILES ${CASS_SRC_FILES} ${HTTP_PARSER_SOURCE_FILES}) + set(CASS_ALL_SOURCE_FILES ${CASS_SRC_FILES} ${CASS_API_HEADER_FILES} ${CASS_INC_FILES}) endmacro() @@ -1001,6 +1070,7 @@ macro(CassConfigure) else() check_symbol_exists(arc4random_buf "stdlib.h" HAVE_ARC4RANDOM) endif() + # Determine if sigpipe is available check_symbol_exists(SO_NOSIGPIPE "sys/socket.h;sys/types.h" HAVE_NOSIGPIPE) check_symbol_exists(sigtimedwait "signal.h" HAVE_SIGTIMEDWAIT) @@ -1008,7 +1078,6 @@ macro(CassConfigure) message(WARNING "Unable to handle SIGPIPE on your platform") endif() - # Determine if hash is in the tr1 namespace string(REPLACE "::" ";" HASH_NAMESPACE_LIST ${HASH_NAMESPACE}) foreach(NAMESPACE ${HASH_NAMESPACE_LIST}) @@ -1018,7 +1087,7 @@ macro(CassConfigure) endforeach() # Check for GCC compiler builtins - if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") check_cxx_source_compiles("int main() { return __builtin_bswap32(42); }" HAVE_BUILTIN_BSWAP32) check_cxx_source_compiles("int main() { return __builtin_bswap64(42); }" HAVE_BUILTIN_BSWAP64) endif() diff --git a/cmake/modules/ExternalProject-libuv.cmake b/cmake/modules/ExternalProject-libuv.cmake index 1d7f4ed3b..d008138e4 100644 --- a/cmake/modules/ExternalProject-libuv.cmake +++ b/cmake/modules/ExternalProject-libuv.cmake @@ -22,7 +22,7 @@ if(NOT LIBUV_INSTALL_PREFIX) endif() option(LIBUV_VERSION "libuv version to build and install") if(NOT LIBUV_VERSION) - set(LIBUV_VERSION "1.29.1") + set(LIBUV_VERSION "1.32.0") endif() set(LIBUV_VERSION ${LIBUV_VERSION} CACHE STRING "libuv version to build and install" FORCE) diff --git a/docs.yaml b/docs.yaml index bf151fdf4..4508fbfbb 100644 --- a/docs.yaml +++ b/docs.yaml @@ -53,10 +53,13 @@ rewrites: - http://www.datastax.com/documentation/cql/3.1: https://docs.datastax.com/en/archived/cql/3.1 - http://www.datastax.com/documentation/cassandra/2.: https://docs.datastax.com/en/archived/cassandra/2. - http://downloads.datastax.com/cpp-driver: https://downloads.datastax.com/cpp-driver/ + - http://www.datastax.com/dev/blog/datastax-c-driver(.*)$: https://www.datastax.com/blog/ rules: use_path_nav_files_only: true versions: + - name: "2.14" + ref: 2.14.0 - name: "2.13" ref: 2.13.0 - name: "2.12" diff --git a/driver_config.hpp.in b/driver_config.hpp.in index 43d8a6df2..979f783f2 100644 --- a/driver_config.hpp.in +++ b/driver_config.hpp.in @@ -12,5 +12,6 @@ #cmakedefine HAVE_ARC4RANDOM #cmakedefine HAVE_GETRANDOM #cmakedefine HAVE_TIMERFD +#cmakedefine HAVE_ZLIB #endif diff --git a/examples/async/CMakeLists.txt b/examples/async/CMakeLists.txt index 83dd9a540..c14971284 100644 --- a/examples/async/CMakeLists.txt +++ b/examples/async/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/auth/CMakeLists.txt b/examples/auth/CMakeLists.txt index 75ffeac9a..2a4071484 100644 --- a/examples/auth/CMakeLists.txt +++ b/examples/auth/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/basic/CMakeLists.txt b/examples/basic/CMakeLists.txt index fe54ecce5..57123cee3 100644 --- a/examples/basic/CMakeLists.txt +++ b/examples/basic/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/batch/CMakeLists.txt b/examples/batch/CMakeLists.txt index 496b6a168..5da99ca24 100644 --- a/examples/batch/CMakeLists.txt +++ b/examples/batch/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/bind_by_name/CMakeLists.txt b/examples/bind_by_name/CMakeLists.txt index d89178243..de3ef6dbd 100644 --- a/examples/bind_by_name/CMakeLists.txt +++ b/examples/bind_by_name/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/callbacks/CMakeLists.txt b/examples/callbacks/CMakeLists.txt index 4c17e89fd..e91448ccc 100644 --- a/examples/callbacks/CMakeLists.txt +++ b/examples/callbacks/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/cloud/.gitignore b/examples/cloud/.gitignore new file mode 100644 index 000000000..c3de202fd --- /dev/null +++ b/examples/cloud/.gitignore @@ -0,0 +1 @@ +cloud diff --git a/examples/cloud/CMakeLists.txt b/examples/cloud/CMakeLists.txt new file mode 100644 index 000000000..731fdac05 --- /dev/null +++ b/examples/cloud/CMakeLists.txt @@ -0,0 +1,12 @@ +cmake_minimum_required(VERSION 2.6.4) + +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ".") +set(PROJECT_EXAMPLE_NAME cloud) + +file(GLOB EXAMPLE_SRC_FILES ${CASS_ROOT_DIR}/examples/cloud/*.c) +include_directories(${INCLUDES}) +add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) +target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) +add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) + +set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/cloud/cloud.c b/examples/cloud/cloud.c new file mode 100644 index 000000000..c8b4a7266 --- /dev/null +++ b/examples/cloud/cloud.c @@ -0,0 +1,109 @@ +/* + This is free and unencumbered software released into the public domain. + + Anyone is free to copy, modify, publish, use, compile, sell, or + distribute this software, either in source code form or as a compiled + binary, for any purpose, commercial or non-commercial, and by any + means. + + In jurisdictions that recognize copyright laws, the author or authors + of this software dedicate any and all copyright interest in the + software to the public domain. We make this dedication for the benefit + of the public at large and to the detriment of our heirs and + successors. We intend this dedication to be an overt act of + relinquishment in perpetuity of all present and future rights to this + software under copyright law. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + For more information, please refer to +*/ + +#include +#include + +int main(int argc, char* argv[]) { + /* Setup and connect to cluster */ + CassFuture* connect_future = NULL; + CassCluster* cluster; + CassSession* session; + + const char* secure_connect_bundle; + const char* username; + const char* password; + + if (argc < 4) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + secure_connect_bundle = argv[1]; + username = argv[2]; + password = argv[3]; + + cluster = cass_cluster_new(); + session = cass_session_new(); + + /* Setup driver to connect to the cloud using the secure connection bundle */ + if (cass_cluster_set_cloud_secure_connection_bundle(cluster, secure_connect_bundle) != CASS_OK) { + fprintf(stderr, "Unable to configure cloud using the secure connection bundle: %s\n", + secure_connect_bundle); + } + + cass_cluster_set_credentials(cluster, username, password); + + /* Provide the cluster object as configuration to connect the session */ + connect_future = cass_session_connect(session, cluster); + + if (cass_future_error_code(connect_future) == CASS_OK) { + /* Build statement and execute query */ + const char* query = "SELECT release_version FROM system.local"; + CassStatement* statement = cass_statement_new(query, 0); + + CassFuture* result_future = cass_session_execute(session, statement); + + if (cass_future_error_code(result_future) == CASS_OK) { + /* Retrieve result set and get the first row */ + const CassResult* result = cass_future_get_result(result_future); + const CassRow* row = cass_result_first_row(result); + + if (row) { + const CassValue* value = cass_row_get_column_by_name(row, "release_version"); + + const char* release_version; + size_t release_version_length; + cass_value_get_string(value, &release_version, &release_version_length); + printf("release_version: '%.*s'\n", (int)release_version_length, release_version); + } + + cass_result_free(result); + } else { + /* Handle error */ + const char* message; + size_t message_length; + cass_future_error_message(result_future, &message, &message_length); + fprintf(stderr, "Unable to run query: '%.*s'\n", (int)message_length, message); + } + + cass_statement_free(statement); + cass_future_free(result_future); + } else { + /* Handle error */ + const char* message; + size_t message_length; + cass_future_error_message(connect_future, &message, &message_length); + fprintf(stderr, "Unable to connect: '%.*s'\n", (int)message_length, message); + } + + cass_future_free(connect_future); + cass_cluster_free(cluster); + cass_session_free(session); + + return 0; +} diff --git a/examples/collections/CMakeLists.txt b/examples/collections/CMakeLists.txt index c3f1c8e8a..c014b9dd3 100644 --- a/examples/collections/CMakeLists.txt +++ b/examples/collections/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/concurrent_executions/CMakeLists.txt b/examples/concurrent_executions/CMakeLists.txt index dd0afdebd..5f9a55326 100644 --- a/examples/concurrent_executions/CMakeLists.txt +++ b/examples/concurrent_executions/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/date_time/CMakeLists.txt b/examples/date_time/CMakeLists.txt index 10aca827d..37e27431d 100644 --- a/examples/date_time/CMakeLists.txt +++ b/examples/date_time/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/duration/CMakeLists.txt b/examples/duration/CMakeLists.txt index 238ddbf66..cb20e6d66 100644 --- a/examples/duration/CMakeLists.txt +++ b/examples/duration/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/execution_profiles/CMakeLists.txt b/examples/execution_profiles/CMakeLists.txt index e11becbee..bcf6010e1 100644 --- a/examples/execution_profiles/CMakeLists.txt +++ b/examples/execution_profiles/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/host_listener/CMakeLists.txt b/examples/host_listener/CMakeLists.txt index ab77f645f..ff8d0d64c 100644 --- a/examples/host_listener/CMakeLists.txt +++ b/examples/host_listener/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/logging/CMakeLists.txt b/examples/logging/CMakeLists.txt index ab7240f28..105618c4d 100644 --- a/examples/logging/CMakeLists.txt +++ b/examples/logging/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/maps/CMakeLists.txt b/examples/maps/CMakeLists.txt index f6e2677b6..d2ae7f72c 100644 --- a/examples/maps/CMakeLists.txt +++ b/examples/maps/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/named_parameters/CMakeLists.txt b/examples/named_parameters/CMakeLists.txt index 02310f092..cc52df5e6 100644 --- a/examples/named_parameters/CMakeLists.txt +++ b/examples/named_parameters/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/paging/CMakeLists.txt b/examples/paging/CMakeLists.txt index 4f56b7b23..03830d6b0 100644 --- a/examples/paging/CMakeLists.txt +++ b/examples/paging/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/perf/CMakeLists.txt b/examples/perf/CMakeLists.txt index b7dc4fa03..b754dba4f 100644 --- a/examples/perf/CMakeLists.txt +++ b/examples/perf/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/prepared/CMakeLists.txt b/examples/prepared/CMakeLists.txt index b8154777b..882211f01 100644 --- a/examples/prepared/CMakeLists.txt +++ b/examples/prepared/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/schema_meta/CMakeLists.txt b/examples/schema_meta/CMakeLists.txt index 059c27ae6..2fac2964c 100644 --- a/examples/schema_meta/CMakeLists.txt +++ b/examples/schema_meta/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/simple/CMakeLists.txt b/examples/simple/CMakeLists.txt index 8210ce63b..869730a97 100644 --- a/examples/simple/CMakeLists.txt +++ b/examples/simple/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/ssl/CMakeLists.txt b/examples/ssl/CMakeLists.txt index 5865ddecb..c08175135 100644 --- a/examples/ssl/CMakeLists.txt +++ b/examples/ssl/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/tracing/CMakeLists.txt b/examples/tracing/CMakeLists.txt index 5c9ca583b..0053c4f81 100644 --- a/examples/tracing/CMakeLists.txt +++ b/examples/tracing/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/tuple/CMakeLists.txt b/examples/tuple/CMakeLists.txt index d1645eecf..218f7f016 100644 --- a/examples/tuple/CMakeLists.txt +++ b/examples/tuple/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/udt/CMakeLists.txt b/examples/udt/CMakeLists.txt index 94f2ab8d9..7a3fb5046 100644 --- a/examples/udt/CMakeLists.txt +++ b/examples/udt/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/examples/uuids/CMakeLists.txt b/examples/uuids/CMakeLists.txt index 5659381e2..8302cedc8 100644 --- a/examples/uuids/CMakeLists.txt +++ b/examples/uuids/CMakeLists.txt @@ -9,7 +9,4 @@ add_executable(${PROJECT_EXAMPLE_NAME} ${EXAMPLE_SRC_FILES}) target_link_libraries(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET} ${CASS_LIBS}) add_dependencies(${PROJECT_EXAMPLE_NAME} ${PROJECT_LIB_NAME_TARGET}) -set_property( - TARGET ${PROJECT_EXAMPLE_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_EXAMPLE_C_FLAGS}) set_property(TARGET ${PROJECT_EXAMPLE_NAME} PROPERTY FOLDER "Examples") diff --git a/gtests/CMakeLists.txt b/gtests/CMakeLists.txt index 87389271c..b70790d4f 100644 --- a/gtests/CMakeLists.txt +++ b/gtests/CMakeLists.txt @@ -98,7 +98,10 @@ if(CASS_BUILD_INTEGRATION_TESTS) ${PROJECT_LIB_NAME_TARGET}) set_property(TARGET ${INTEGRATION_TESTS_NAME} PROPERTY PROJECT_LABEL ${INTEGRATION_TESTS_DISPLAY_NAME}) set_property(TARGET ${INTEGRATION_TESTS_NAME} PROPERTY FOLDER "Tests") - set_property(TARGET ${INTEGRATION_TESTS_NAME} APPEND PROPERTY COMPILE_FLAGS ${TEST_CXX_FLAGS}) + # Enable bigobj for large object files during compilation (Cassandra types integration test) + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + set_property(TARGET ${INTEGRATION_TESTS_NAME} APPEND PROPERTY COMPILE_FLAGS "/bigobj") + endif() if(LIBSSH2_LIBRARY_NAME) add_dependencies(${INTEGRATION_TESTS_NAME} ${LIBSSH2_LIBRARY_NAME}) endif() @@ -124,5 +127,10 @@ endif() # Unit test executable #------------------------------ if(CASS_BUILD_UNIT_TESTS) - GtestUnitTests("cassandra" "" "" "${CASS_EXCLUDED_UNIT_TEST_FILES}") + # Add the ability to create zip files inside the unit tests + set(MINIZIP_INCLUDE_DIR "${CASS_SRC_DIR}/third_party/minizip") + set(MINIZIP_SOURCE_FILES ${MINIZIP_INCLUDE_DIR}/ioapi.c + ${MINIZIP_INCLUDE_DIR}/zip.c) + + GtestUnitTests("cassandra" "${MINIZIP_SOURCE_FILES}" "${MINIZIP_INCLUDE_DIR}" "${CASS_EXCLUDED_UNIT_TEST_FILES}") endif() diff --git a/gtests/src/integration/driver_utils.cpp b/gtests/src/integration/driver_utils.cpp index b507d3e87..b4134a4c7 100644 --- a/gtests/src/integration/driver_utils.cpp +++ b/gtests/src/integration/driver_utils.cpp @@ -50,13 +50,13 @@ unsigned int test::driver::internals::Utils::connect_timeout(CassCluster* cluste std::string test::driver::internals::Utils::contact_points(CassCluster* cluster) { std::string contact_points; - const ContactPointList& contact_points_list = cluster->config().contact_points(); - for (ContactPointList::const_iterator it = contact_points_list.begin(); - it != contact_points_list.end(); ++it) { + const AddressVec& contact_points_list = cluster->config().contact_points(); + for (AddressVec::const_iterator it = contact_points_list.begin(); it != contact_points_list.end(); + ++it) { if (contact_points.size() > 0) { contact_points.push_back(','); } - contact_points.append((*it).c_str()); + contact_points.append((*it).hostname_or_address().c_str()); } return contact_points; } @@ -73,7 +73,17 @@ std::string test::driver::internals::Utils::host(CassFuture* future) { if (future) { Future* cass_future = static_cast(future); if (cass_future->type() == Future::FUTURE_TYPE_RESPONSE) { - return static_cast(cass_future)->address().to_string().c_str(); + return static_cast(cass_future)->address().hostname_or_address().c_str(); + } + } + return ""; +} + +std::string test::driver::internals::Utils::server_name(CassFuture* future) { + if (future) { + Future* cass_future = static_cast(future); + if (cass_future->type() == Future::FUTURE_TYPE_RESPONSE) { + return static_cast(cass_future)->address().server_name().c_str(); } } return ""; diff --git a/gtests/src/integration/driver_utils.hpp b/gtests/src/integration/driver_utils.hpp index f86620ef4..d3808c4ff 100644 --- a/gtests/src/integration/driver_utils.hpp +++ b/gtests/src/integration/driver_utils.hpp @@ -73,6 +73,14 @@ class Utils { */ static std::string host(CassFuture* future); + /** + * Get the server name of the future + * + * @param future Future to retrieve server name from + * @return Server name + */ + static std::string server_name(CassFuture* future); + /** * Get the Murmur3 hash for a given value * diff --git a/gtests/src/integration/integration.cpp b/gtests/src/integration/integration.cpp index 3d08f52ab..b6cd6c2b6 100644 --- a/gtests/src/integration/integration.cpp +++ b/gtests/src/integration/integration.cpp @@ -52,9 +52,11 @@ Integration::Integration() , is_with_vnodes_(false) , is_randomized_contact_points_(false) , is_schema_metadata_(false) + , is_ccm_requested_(true) , is_ccm_start_requested_(true) , is_ccm_start_node_individually_(false) , is_session_requested_(true) + , is_keyspace_change_requested_(true) , is_test_chaotic_(false) , is_beta_protocol_(Options::is_beta_protocol()) , protocol_version_(CASS_HIGHEST_SUPPORTED_PROTOCOL_VERSION) @@ -63,7 +65,7 @@ Integration::Integration() // Determine if the schema keyspaces table should be updated // TODO: Make cass_version (and dse_version) available for all tests CCM::CassVersion cass_version = server_version_; - if (Options::is_dse()) { + if (!Options::is_cassandra()) { cass_version = static_cast(cass_version).get_cass_version(); } if (cass_version >= "3.0.0") { @@ -138,47 +140,49 @@ void Integration::SetUp() { data_center_nodes.push_back(number_dc1_nodes_); data_center_nodes.push_back(number_dc2_nodes_); - try { - // Create and start the CCM cluster (if not already created) - ccm_ = new CCM::Bridge( - server_version_, Options::use_git(), Options::branch_tag(), Options::use_install_dir(), - Options::install_dir(), Options::is_dse(), dse_workload_, Options::cluster_prefix(), - Options::dse_credentials(), Options::dse_username(), Options::dse_password(), - Options::deployment_type(), Options::authentication_type(), Options::host(), - Options::port(), Options::username(), Options::password(), Options::public_key(), - Options::private_key(), Options::is_verbose_ccm()); - if (ccm_->create_cluster(data_center_nodes, is_with_vnodes_, is_password_authenticator_, - is_ssl_, is_client_authentication_)) { - if (is_ccm_start_requested_) { - if (is_ccm_start_node_individually_) { - for (unsigned short node = 1; node <= (number_dc1_nodes_ + number_dc2_nodes_); ++node) { + if (is_ccm_requested_) { + try { + // Create and start the CCM cluster (if not already created) + ccm_ = new CCM::Bridge( + server_version_, Options::use_git(), Options::branch_tag(), Options::use_install_dir(), + Options::install_dir(), Options::server_type(), dse_workload_, Options::cluster_prefix(), + Options::dse_credentials(), Options::dse_username(), Options::dse_password(), + Options::deployment_type(), Options::authentication_type(), Options::host(), + Options::port(), Options::username(), Options::password(), Options::public_key(), + Options::private_key(), Options::is_verbose_ccm()); + if (ccm_->create_cluster(data_center_nodes, is_with_vnodes_, is_password_authenticator_, + is_ssl_, is_client_authentication_)) { + if (is_ccm_start_requested_) { + if (is_ccm_start_node_individually_) { + for (unsigned short node = 1; node <= (number_dc1_nodes_ + number_dc2_nodes_); ++node) { + if (is_password_authenticator_) { + ccm_->start_node(node, "-Dcassandra.superuser_setup_delay_ms=0"); + } else { + ccm_->start_node(node); + } + } + } else { if (is_password_authenticator_) { - ccm_->start_node(node, "-Dcassandra.superuser_setup_delay_ms=0"); + ccm_->start_cluster("-Dcassandra.superuser_setup_delay_ms=0"); } else { - ccm_->start_node(node); + ccm_->start_cluster(); } } - } else { - if (is_password_authenticator_) { - ccm_->start_cluster("-Dcassandra.superuser_setup_delay_ms=0"); - } else { - ccm_->start_cluster(); - } } } - } - // Generate the default contact points - contact_points_ = - generate_contact_points(ccm_->get_ip_prefix(), number_dc1_nodes_ + number_dc2_nodes_); + // Generate the default contact points + contact_points_ = + generate_contact_points(ccm_->get_ip_prefix(), number_dc1_nodes_ + number_dc2_nodes_); - // Determine if the session connection should be established - if (is_session_requested_ && is_ccm_start_requested_) { - connect(); + // Determine if the session connection should be established + if (is_session_requested_ && is_ccm_start_requested_) { + connect(); + } + } catch (CCM::BridgeException be) { + // Issue creating the CCM bridge instance (force failure) + FAIL() << be.what(); } - } catch (CCM::BridgeException be) { - // Issue creating the CCM bridge instance (force failure) - FAIL() << be.what(); } } @@ -206,7 +210,9 @@ void Integration::TearDown() { // Determine if the CCM cluster should be destroyed if (is_test_chaotic_) { // Destroy the current cluster and reset the chaos flag for the next test - ccm_->remove_cluster(); + if (!Options::keep_clusters()) { + ccm_->remove_cluster(); + } is_test_chaotic_ = false; } } @@ -295,6 +301,16 @@ void Integration::drop_type(const std::string& type_name) { session_.execute(drop_type_query.str(), CASS_CONSISTENCY_ANY, false, false); } +bool Integration::use_keyspace(const std::string& keyspace_name) { + std::stringstream use_keyspace_query; + use_keyspace_query << "USE " << keyspace_name; + session_.execute(use_keyspace_query.str()); + if (this->HasFailure()) { + return false; + } + return true; +} + void Integration::connect(Cluster cluster) { // Establish the session connection cluster_ = cluster; @@ -303,6 +319,10 @@ void Integration::connect(Cluster cluster) { // Update the server version if branch_tag was specified if (Options::use_git() && !Options::branch_tag().empty()) { + if (Options::is_ddac()) { + FAIL() << "Unable to build DDAC from Branch/Tag"; + return; + } if (Options::is_dse()) { server_version_ = ccm_->get_dse_version(); } else { @@ -317,9 +337,9 @@ void Integration::connect(Cluster cluster) { CHECK_FAILURE; // Update the session to use the new keyspace by default - std::stringstream use_keyspace_query; - use_keyspace_query << "USE " << keyspace_name_; - session_.execute(use_keyspace_query.str()); + if (is_keyspace_change_requested_) { + use_keyspace(keyspace_name_); + } } void Integration::connect() { diff --git a/gtests/src/integration/integration.hpp b/gtests/src/integration/integration.hpp index c7d7a9734..a4f695d33 100644 --- a/gtests/src/integration/integration.hpp +++ b/gtests/src/integration/integration.hpp @@ -37,11 +37,13 @@ // Macros for grouping tests together #define GROUP_TEST_F(group_name, test_case, test_name) TEST_F(test_case, group_name##_##test_name) +#define GROUP_TEST(group_name, test_case, test_name) TEST(test_case, group_name##_##test_name) #define GROUP_TYPED_TEST_P(group_name, test_case, test_name) \ TYPED_TEST_P(test_case, group_name##_##test_name) // Macros to use for grouping integration tests together -#define GROUP_INTEGRATION_TEST(server_type) GROUP_CONCAT(Integration, server_type) +#define INTEGRATION_TEST(server_type, test_case, test_name) \ + GROUP_TEST(Integration##_##server_type, test_case, test_name) #define INTEGRATION_TEST_F(server_type, test_case, test_name) \ GROUP_TEST_F(Integration##_##server_type, test_case, test_name) #define INTEGRATION_TYPED_TEST_P(server_type, test_case, test_name) \ @@ -52,7 +54,8 @@ GROUP_TYPED_TEST_P(DISABLED##_##Integration##_##server_type, test_case, est_name) // Macros to use for grouping Cassandra integration tests together -#define CASSANDRA_TEST_NAME(test_name) Integration##_##Cassandra##_##test_name +#define CASSANDRA_INTEGRATION_TEST(test_case, test_name) \ + INTEGRATION_TEST(Cassandra, test_case, test_name) #define CASSANDRA_INTEGRATION_TEST_F(test_case, test_name) \ INTEGRATION_TEST_F(Cassandra, test_case, test_name) #define CASSANDRA_INTEGRATION_TYPED_TEST_P(test_case, test_name) \ @@ -83,7 +86,7 @@ #define CHECK_VERSION(version) \ do { \ CCM::CassVersion cass_version = this->server_version_; \ - if (Options::is_dse()) { \ + if (!Options::is_cassandra()) { \ cass_version = static_cast(cass_version).get_cass_version(); \ } \ if (cass_version < #version) { \ @@ -98,7 +101,7 @@ #define CHECK_VALUE_TYPE_VERSION(type) \ CCM::CassVersion cass_version = this->server_version_; \ - if (Options::is_dse()) { \ + if (!Options::is_cassandra()) { \ cass_version = static_cast(cass_version).get_cass_version(); \ } \ if (cass_version < type::supported_server_version()) { \ @@ -107,8 +110,12 @@ #define CHECK_CONTINUE(flag, message) ASSERT_TRUE(flag) << message; -#define CASSANDRA_KEY_VALUE_TABLE_FORMAT "CREATE TABLE %s (key %s PRIMARY KEY, value %s)" +#define CASSANDRA_KEY_VALUE_TABLE_FORMAT \ + "CREATE TABLE IF NOT EXISTS %s (key %s PRIMARY KEY, value %s)" +#define CASSANDRA_KEY_VALUE_QUALIFIED_TABLE_FORMAT \ + "CREATE TABLE IF NOT EXISTS %s.%s (key %s PRIMARY KEY, value %s)" #define CASSANDRA_KEY_VALUE_INSERT_FORMAT "INSERT INTO %s (key, value) VALUES(%s, %s)" +#define CASSANDRA_KEY_VALUE_QUALIFIED_INSERT_FORMAT "INSERT INTO %s.%s (key, value) VALUES(%s, %s)" #define CASSANDRA_SELECT_VALUE_FORMAT "SELECT value FROM %s WHERE key=%s" #define CASSANDRA_DELETE_ROW_FORMAT "DELETE FROM %s WHERE key=%s" #define CASSANDRA_UPDATE_VALUE_FORMAT "UPDATE %s SET value=%s WHERE key=%s" @@ -252,6 +259,12 @@ class Integration : public testing::Test { * (DEFAULT: false) */ bool is_schema_metadata_; + /** + * Setting to determine if CCM instance should be created. True if CCM instance + * should be created; false otherwise. + * (DEFAULT: true) + */ + bool is_ccm_requested_; /** * Setting to determine if CCM cluster should be started. True if CCM cluster * should be started; false otherwise. @@ -272,6 +285,11 @@ class Integration : public testing::Test { * (DEFAULT: true) */ bool is_session_requested_; + /** + * Flag to indicate if the newly created keyspace should be set for the session connection. + * (DEFAULT: true) + */ + bool is_keyspace_change_requested_; /** * Flag to indicate if a test is chaotic and should have its CCM cluster * destroyed @@ -369,6 +387,14 @@ class Integration : public testing::Test { */ virtual void drop_type(const std::string& type_name); + /** + * Update the current keyspace used by the session + * + * @param keyspace_name Keyspace to use + * @return True if keyspace was changed; false otherwise + */ + virtual bool use_keyspace(const std::string& keyspace_name); + /** * Establish the session connection using provided cluster object. * diff --git a/gtests/src/integration/main.cpp b/gtests/src/integration/main.cpp index c9b2bcdd4..6efbdfe50 100644 --- a/gtests/src/integration/main.cpp +++ b/gtests/src/integration/main.cpp @@ -18,6 +18,7 @@ #include "bridge.hpp" #include "options.hpp" +#include "ssl.hpp" #include "win_debug.hpp" #include "cassandra.h" @@ -25,6 +26,8 @@ #include +using datastax::internal::core::SslContextFactory; + /** * Bootstrap listener for handling start and end of the integration tests. */ @@ -65,6 +68,10 @@ class BootstrapListener : public testing::EmptyTestEventListener { } } + void OnTestStart(const testing::TestInfo& test_information) { SslContextFactory::init(); } + + void OnTestEnd(const testing::TestInfo& test_information) { SslContextFactory::cleanup(); } + private: /** * Current category @@ -106,11 +113,7 @@ std::string generate_filter(TestCategory category, const std::string& base_filte int main(int argc, char* argv[]) { // Initialize the Google testing framework testing::InitGoogleTest(&argc, argv); - - // Add a bootstrap mechanism for program start and finish - BootstrapListener* listener = NULL; testing::TestEventListeners& listeners = testing::UnitTest::GetInstance()->listeners(); - listeners.Append(listener = new BootstrapListener()); #if defined(_WIN32) && defined(_DEBUG) // Add the memory leak checking to the listener callbacks @@ -121,6 +124,10 @@ int main(int argc, char* argv[]) { #endif #endif + // Add a bootstrap mechanism for program start and finish + BootstrapListener* listener = NULL; + listeners.Append(listener = new BootstrapListener()); + // Initialize the options for the integration test if (Options::initialize(argc, argv)) { // Run the integration tests from each applicable category diff --git a/gtests/src/integration/objects/future.hpp b/gtests/src/integration/objects/future.hpp index b9e87c4a7..8df887f86 100644 --- a/gtests/src/integration/objects/future.hpp +++ b/gtests/src/integration/objects/future.hpp @@ -97,6 +97,13 @@ class Future : public Object { */ const std::string host() { return internals::Utils::host(get()); } + /** + * Get the server name of the future + * + * @return Server name + */ + const std::string server_name() { return internals::Utils::server_name(get()); } + /** * Get the result from the future * diff --git a/gtests/src/integration/objects/result.hpp b/gtests/src/integration/objects/result.hpp index da70ad06d..0ac69a649 100644 --- a/gtests/src/integration/objects/result.hpp +++ b/gtests/src/integration/objects/result.hpp @@ -113,6 +113,13 @@ class Result : public Object { */ const std::string host() { return future_.host(); } + /** + * Get the server name of the future + * + * @return Server name + */ + const std::string server_name() { return future_.server_name(); } + /** * Get the number of columns from the result * diff --git a/gtests/src/integration/options.cpp b/gtests/src/integration/options.cpp index fc371c7da..ab3ca1a55 100644 --- a/gtests/src/integration/options.cpp +++ b/gtests/src/integration/options.cpp @@ -24,7 +24,8 @@ #include #define DEFAULT_OPTIONS_CASSSANDRA_VERSION CCM::CassVersion("3.11.4") -#define DEFAULT_OPTIONS_DSE_VERSION CCM::DseVersion("6.0.8") +#define DEFAULT_OPTIONS_DSE_VERSION CCM::DseVersion("6.7.5") +#define DEFAULT_OPTIONS_DDAC_VERSION CCM::DseVersion("5.1.17") // Initialize the defaults for all the options bool Options::is_initialized_ = false; @@ -32,7 +33,6 @@ bool Options::is_help_ = false; bool Options::is_keep_clusters_ = false; bool Options::is_log_tests_ = true; CCM::CassVersion Options::server_version_ = DEFAULT_OPTIONS_CASSSANDRA_VERSION; -bool Options::is_dse_ = false; bool Options::use_git_ = false; std::string Options::branch_tag_; bool Options::use_install_dir_ = false; @@ -55,6 +55,7 @@ CCM::DseCredentialsType Options::dse_credentials_type_; CCM::AuthenticationType Options::authentication_type_; CCM::DeploymentType Options::deployment_type_; std::set Options::categories_; +CCM::ServerType Options::server_type_; bool Options::initialize(int argc, char* argv[]) { // Only allow initialization to occur once @@ -63,6 +64,7 @@ bool Options::initialize(int argc, char* argv[]) { dse_credentials_type_ = CCM::DseCredentialsType::USERNAME_PASSWORD; authentication_type_ = CCM::AuthenticationType::USERNAME_PASSWORD; deployment_type_ = CCM::DeploymentType::LOCAL; + server_type_ = CCM::ServerType::CASSANDRA; // Check for the help argument first (keeps defaults for help display) for (int i = 1; i < argc; ++i) { @@ -77,6 +79,8 @@ bool Options::initialize(int argc, char* argv[]) { for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--dse") { server_version_ = DEFAULT_OPTIONS_DSE_VERSION; + } else if (std::string(argv[i]) == "--ddac") { + server_version_ = DEFAULT_OPTIONS_DDAC_VERSION; } } @@ -107,7 +111,9 @@ bool Options::initialize(int argc, char* argv[]) { << std::endl; } } else if (key == "--dse") { - is_dse_ = true; + server_type_ = CCM::ServerType::DSE; + } else if (key == "--ddac") { + server_type_ = CCM::ServerType::DDAC; } else if (key == "--dse-username") { if (!value.empty()) { dse_username_ = value; @@ -129,7 +135,7 @@ bool Options::initialize(int argc, char* argv[]) { } } if (!is_found) { - std::cerr << "Invalid DSE Credentials Type: Using default " + std::cerr << "Invalid DSE/DDAC Credentials Type: Using default " << dse_credentials_type_.to_string() << std::endl; } } else if (key == "--git") { @@ -272,7 +278,7 @@ bool Options::initialize(int argc, char* argv[]) { for (TestCategory::iterator iterator = TestCategory::begin(); iterator != TestCategory::end(); ++iterator) { // Only add the DSE test category if DSE is enabled - if (*iterator != TestCategory::DSE || is_dse_) { + if (*iterator != TestCategory::DSE || is_dse()) { categories_.insert(*iterator); } else { std::cerr << "DSE Category Will be Ignored: DSE is not enabled [--dse]" << std::endl; @@ -282,11 +288,11 @@ bool Options::initialize(int argc, char* argv[]) { if (deployment_type_ == CCM::DeploymentType::LOCAL) { host_ = "127.0.0.1"; } - if (is_dse_ && !use_install_dir_) { - // Determine if the DSE credentials type should be updated + if (!is_cassandra() && !use_install_dir_) { + // Determine if the DSE/DDAC credentials type should be updated if (dse_credentials_type_ == CCM::DseCredentialsType::USERNAME_PASSWORD) { if (dse_username_.empty() || dse_password_.empty()) { - std::cerr << "Invalid Username and/or Password: Default to INI_FILE DSE credentials" + std::cerr << "Invalid Username and/or Password: Default to INI_FILE DSE/DDAC credentials" << std::endl; dse_credentials_type_ = CCM::DseCredentialsType::INI_FILE; } @@ -309,10 +315,11 @@ void Options::print_help() { std::cout << std::endl << "CCM Options:" << std::endl; std::cout << " --version=[VERSION]" << std::endl << " " - << "Cassandra/DSE version to use." << std::endl + << "Cassandra/DSE/DDAC version to use." << std::endl << " Default:" << std::endl << " Cassandra Version: " << server_version().to_string() << std::endl - << " DSE Version: " << DEFAULT_OPTIONS_DSE_VERSION.to_string() << std::endl; + << " DSE Version: " << DEFAULT_OPTIONS_DSE_VERSION.to_string() << std::endl + << " DDAC Version: " << DEFAULT_OPTIONS_DDAC_VERSION.to_string() << std::endl; std::string categories; for (TestCategory::iterator iterator = TestCategory::begin(); iterator != TestCategory::end(); ++iterator) { @@ -330,16 +337,20 @@ void Options::print_help() { std::cout << " --dse" << std::endl << " " << "Indicate server version supplied is DSE." << std::endl; + std::cout << " --ddac" << std::endl + << " " + << "Indicate server version supplied is DDAC." << std::endl; std::cout << " --dse-credentials=(USERNAME_PASSWORD|INI_FILE)" << std::endl << " " - << "DSE credentials to use for download authentication. The default is " << std::endl + << "DSE/DDAC credentials to use for download authentication. The default is " + << std::endl << " " << dse_credentials().to_string() << "." << std::endl; std::cout << " --dse-username=[USERNAME]" << std::endl << " " - << "Username to use for DSE download authentication." << std::endl; + << "Username to use for DSE/DDAC download authentication." << std::endl; std::cout << " --dse-password=[PASSWORD]" << std::endl << " " - << "Password to use for DSE download authentication." << std::endl; + << "Password to use for DSE/DDAC download authentication." << std::endl; std::cout << " --git" << std::endl << " " << "Indicate Cassandra/DSE server download should be obtained from" << std::endl @@ -408,18 +419,20 @@ void Options::print_settings() { if (log_tests()) { std::cout << " Logging driver messages" << std::endl; } - if (is_dse()) { - std::cout << " DSE Version: " << CCM::DseVersion(server_version()).to_string() << std::endl; + if (!is_cassandra()) { + std::cout << " " << server_type_.to_string() + << " Version: " << CCM::DseVersion(server_version()).to_string() << std::endl; if (!use_install_dir()) { if (dse_credentials() == CCM::DseCredentialsType::USERNAME_PASSWORD) { std::cout << " Username: " << dse_username() << std::endl; std::cout << " Password: " << dse_password() << std::endl; } else { - std::cout << " Using INI file for DSE download authentication" << std::endl; + std::cout << " Using INI file for DSE/DDAC download authentication" << std::endl; } } } else { - std::cout << " Cassandra Version: " << server_version().to_string() << std::endl; + std::cout << " " << server_type_.to_string() << " Version: " << server_version().to_string() + << std::endl; } if (use_install_dir()) { std::cout << " Using installation directory [" << install_dir() << "]" << std::endl; @@ -456,7 +469,13 @@ bool Options::log_tests() { return is_log_tests_; } CCM::CassVersion Options::server_version() { return server_version_; } -bool Options::is_dse() { return is_dse_; } +CCM::ServerType Options::server_type() { return server_type_; } + +bool Options::is_cassandra() { return server_type_ == CCM::ServerType::CASSANDRA; } + +bool Options::is_dse() { return server_type_ == CCM::ServerType::DSE; } + +bool Options::is_ddac() { return server_type_ == CCM::ServerType::DDAC; } CCM::DseCredentialsType Options::dse_credentials() { // Static initialization cannot be guaranteed @@ -514,7 +533,7 @@ const std::string& Options::private_key() { return private_key_; } SharedPtr > Options::ccm() { return new CCM::Bridge(Options::server_version(), Options::use_git(), Options::branch_tag(), - Options::use_install_dir(), Options::install_dir(), Options::is_dse(), + Options::use_install_dir(), Options::install_dir(), Options::server_type(), CCM::Bridge::DEFAULT_DSE_WORKLOAD, Options::cluster_prefix(), Options::dse_credentials(), Options::dse_username(), Options::dse_password(), Options::deployment_type(), diff --git a/gtests/src/integration/options.hpp b/gtests/src/integration/options.hpp index fa1600b35..95f93e6d0 100644 --- a/gtests/src/integration/options.hpp +++ b/gtests/src/integration/options.hpp @@ -67,17 +67,35 @@ class Options { */ static bool log_tests(); /** - * Get the server version (Cassandra/DSE) to use + * Get the server version (Cassandra/DSE/DDAC) to use * - * @return Cassandra/DSE version to use + * @return Cassandra/DSE/DDAC version to use */ static CCM::CassVersion server_version(); + /** + * Get the server type (Cassandra/DSE/DDAC) + * + * @return Server type + */ + static CCM::ServerType server_type(); + /** + * Flag to determine if Cassandra should be used or not + * + * @return True if Cassandra should be used; false otherwise + */ + static bool is_cassandra(); /** * Flag to determine if DSE should be used or not * * @return True if DSE should be used; false otherwise */ static bool is_dse(); + /** + * Flag to determine if DDAC should be used or not + * + * @return True if DDAC should be used; false otherwise + */ + static bool is_ddac(); /** * Get the DSE credentials type (username|password/INI file) * @@ -240,13 +258,13 @@ class Options { */ static bool is_log_tests_; /** - * Server version to use (Cassandra/DSE) + * Server version to use (Cassandra/DSE/DDAC) */ static CCM::CassVersion server_version_; /** - * Flag to indicate if DSE should be used instead of Cassandra + * Server type to use */ - static bool is_dse_; + static CCM::ServerType server_type_; /** * Flag to determine if Cassandra should be built from ASF git (github if DSE) */ diff --git a/gtests/src/integration/rest_client.cpp b/gtests/src/integration/rest_client.cpp index 2e36de389..7e931997b 100644 --- a/gtests/src/integration/rest_client.cpp +++ b/gtests/src/integration/rest_client.cpp @@ -80,7 +80,8 @@ const Response RestClient::send_request(const Request& request) { // Start the request and attach the HTTP request to send to the REST server uv_connect_t connect; connect.data = &http_request; - uv_tcp_connect(&connect, &tcp, address.addr(), handle_connected); + Address::SocketStorage storage; + uv_tcp_connect(&connect, &tcp, address.to_sockaddr(&storage), handle_connected); uv_run(&loop, UV_RUN_DEFAULT); uv_loop_close(&loop); diff --git a/gtests/src/integration/simulacron/simulacron_cluster.cpp b/gtests/src/integration/simulacron/simulacron_cluster.cpp index 31632dc52..e9f52cb97 100644 --- a/gtests/src/integration/simulacron/simulacron_cluster.cpp +++ b/gtests/src/integration/simulacron/simulacron_cluster.cpp @@ -66,7 +66,7 @@ test::SimulacronCluster::SimulacronCluster() // Determine the release version (for priming nodes) CCM::CassVersion cassandra_version = Options::server_version(); - if (Options::is_dse()) { + if (!Options::is_cassandra()) { CCM::DseVersion dse_version(cassandra_version); cassandra_version = dse_version.get_cass_version(); if (cassandra_version == "0.0.0") { @@ -127,7 +127,7 @@ void test::SimulacronCluster::create_cluster( } // Add the DSE version (if applicable) - if (Options::is_dse()) { + if (!Options::is_cassandra()) { paramters << "&dse_version=" << dse_version_; cluster_name << dse_version_; } else { diff --git a/gtests/src/integration/simulacron/simulacron_integration.hpp b/gtests/src/integration/simulacron/simulacron_integration.hpp index 03b6c69ba..8b03a7419 100644 --- a/gtests/src/integration/simulacron/simulacron_integration.hpp +++ b/gtests/src/integration/simulacron/simulacron_integration.hpp @@ -22,11 +22,12 @@ #include // Macros to use for grouping Simulacron integration tests together -#define SIMULACRON_TEST_NAME(test_name) Integration##_##simulacron##_##test_name +#define SIMULACRON_INTEGRATION_TEST(test_case, test_name) \ + INTEGRATION_TEST(Simulacron, test_case, test_name) #define SIMULACRON_INTEGRATION_TEST_F(test_case, test_name) \ - INTEGRATION_TEST_F(simulacron, test_case, test_name) + INTEGRATION_TEST_F(Simulacron, test_case, test_name) #define SIMULACRON_INTEGRATION_TYPED_TEST_P(test_case, test_name) \ - INTEGRATION_TYPED_TEST_P(simulacron, test_case, test_name) + INTEGRATION_TYPED_TEST_P(Simulacron, test_case, test_name) #define CHECK_SIMULACRON_AVAILABLE \ if (!sc_) { \ diff --git a/gtests/src/integration/test_category.cpp b/gtests/src/integration/test_category.cpp index 50990b1a0..9c22031b4 100644 --- a/gtests/src/integration/test_category.cpp +++ b/gtests/src/integration/test_category.cpp @@ -23,7 +23,7 @@ const TestCategory TestCategory::CASSANDRA("CASSANDRA", 0, "Cassandra", "*_Cassandra_*"); const TestCategory TestCategory::DSE("DSE", 1, "DataStax Enterprise", "*_DSE_*"); const TestCategory TestCategory::SIMULACRON("SIMULACRON", SHRT_MAX, "Simulated DSE (and Cassandra)", - "*_simulacron_*"); + "*_Simulacron_*"); // Static declarations for test type std::set TestCategory::constants_; diff --git a/gtests/src/integration/test_utils.cpp b/gtests/src/integration/test_utils.cpp index b0ba3579d..26bff54fa 100644 --- a/gtests/src/integration/test_utils.cpp +++ b/gtests/src/integration/test_utils.cpp @@ -148,9 +148,9 @@ std::vector test::Utils::explode(const std::string& input, bool test::Utils::file_exists(const std::string& filename) { uv_fs_t request; - int error_code = uv_fs_open(NULL, &request, filename.c_str(), O_RDONLY, 0, NULL); + int error_code = uv_fs_stat(NULL, &request, filename.c_str(), NULL); uv_fs_req_cleanup(&request); - return error_code != UV_ENOENT; + return error_code == 0; } std::string test::Utils::indent(const std::string& input, unsigned int indent) { @@ -274,3 +274,10 @@ bool test::Utils::wait_for_port(const std::string& ip_address, unsigned short po // Unable to establish connection to node on port return false; } + +std::string test::Utils::home_directory() { + char home[FILE_PATH_SIZE] = { 0 }; + size_t home_length = sizeof(home); + uv_os_homedir(home, &home_length); + return std::string(home, home_length); +} diff --git a/gtests/src/integration/test_utils.hpp b/gtests/src/integration/test_utils.hpp index fdf28e80e..fa5641423 100644 --- a/gtests/src/integration/test_utils.hpp +++ b/gtests/src/integration/test_utils.hpp @@ -185,6 +185,13 @@ class Utils { static bool wait_for_port(const std::string& ip_address, unsigned short port, unsigned int number_of_retries = 100, unsigned int retry_delay_ms = 100); + + /** + * Get the home directory for the current user (not thread safe) + * + * @return Home directory + */ + static std::string home_directory(); }; } // namespace test diff --git a/gtests/src/integration/tests/test_auth.cpp b/gtests/src/integration/tests/test_auth.cpp index f37b6d3ff..1671af4f8 100644 --- a/gtests/src/integration/tests/test_auth.cpp +++ b/gtests/src/integration/tests/test_auth.cpp @@ -200,7 +200,7 @@ CASSANDRA_INTEGRATION_TEST_F(AuthenticationTests, BadCredentials) { // Add the proper logging criteria (based on server version) CCM::CassVersion cass_version = this->server_version_; - if (Options::is_dse()) { + if (!Options::is_cassandra()) { cass_version = static_cast(cass_version).get_cass_version(); } if (cass_version >= "3.10") { @@ -242,7 +242,7 @@ CASSANDRA_INTEGRATION_TEST_F(AuthenticationTests, AuthenticatorSetErrorNull) { // Add the proper logging criteria (based on server version) CCM::CassVersion cass_version = this->server_version_; - if (Options::is_dse()) { + if (!Options::is_cassandra()) { cass_version = static_cast(cass_version).get_cass_version(); } if (cass_version >= "3.10") { diff --git a/gtests/src/integration/tests/test_basics.cpp b/gtests/src/integration/tests/test_basics.cpp index 6145b4393..c8cf42d0d 100644 --- a/gtests/src/integration/tests/test_basics.cpp +++ b/gtests/src/integration/tests/test_basics.cpp @@ -334,7 +334,7 @@ CASSANDRA_INTEGRATION_TEST_F(BasicsTests, NoCompactEnabledConnection) { CHECK_VERSION(3.0.16); CHECK_VERSION(3.11.2); CCM::CassVersion cass_version = server_version_; - if (Options::is_dse()) { + if (!Options::is_cassandra()) { if (server_version_ >= "6.0.0") { SKIP_TEST("Unsupported for DataStax Enterprise Version " << server_version_.to_string() diff --git a/gtests/src/integration/tests/test_cluster.cpp b/gtests/src/integration/tests/test_cluster.cpp index 509d024bd..16aed5614 100644 --- a/gtests/src/integration/tests/test_cluster.cpp +++ b/gtests/src/integration/tests/test_cluster.cpp @@ -14,7 +14,12 @@ limitations under the License. */ -#include "objects/cluster.hpp" +#include "integration.hpp" + +class ClusterTests : public Integration { +public: + ClusterTests() { is_ccm_requested_ = false; } +}; /** * Set local dc to null for dc-aware lbp @@ -23,7 +28,7 @@ * @test_category configuration * @expected_result Error out because it is illegal to specify a null local-dc. */ -TEST(ClusterTest, SetLoadBalanceDcAwareNullLocalDc) { +CASSANDRA_INTEGRATION_TEST_F(ClusterTests, SetLoadBalanceDcAwareNullLocalDc) { test::driver::Cluster cluster; EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, cass_cluster_set_load_balance_dc_aware(cluster.get(), NULL, 99, cass_false)); @@ -36,7 +41,7 @@ TEST(ClusterTest, SetLoadBalanceDcAwareNullLocalDc) { * @test_category configuration * @expected_result CASS_ERROR_LIB_BAD_PARAMS. */ -TEST(ClusterTest, ExponentialReconnectionPolicyBadParameters) { +CASSANDRA_INTEGRATION_TEST_F(ClusterTests, ExponentialReconnectionPolicyBadParameters) { test::driver::Cluster cluster; // Base delay must be greater than 1 @@ -46,3 +51,17 @@ TEST(ClusterTest, ExponentialReconnectionPolicyBadParameters) { // Base delay cannot be greater than max delay EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, cass_cluster_set_exponential_reconnect(cluster.get(), 3, 2)); } + +/** + * Set invalid parameters for secure connect bundle. + * + * @jira_ticket CPP-790 + * @test_category configuration + * @expected_result CASS_ERROR_LIB_BAD_PARAMS. + */ +CASSANDRA_INTEGRATION_TEST_F(ClusterTests, SecureConnectionBundleBadParameters) { + test::driver::Cluster cluster; + + EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, cass_cluster_set_cloud_secure_connection_bundle_n( + cluster.get(), "invalid_filename", 16)); +} diff --git a/gtests/src/integration/tests/test_config.cpp b/gtests/src/integration/tests/test_config.cpp index 5df00665e..b981c6ca6 100644 --- a/gtests/src/integration/tests/test_config.cpp +++ b/gtests/src/integration/tests/test_config.cpp @@ -14,21 +14,21 @@ limitations under the License. */ -#include +#include "integration.hpp" -#include "cassandra.h" +class ConfigTests : public Integration { +public: + ConfigTests() { Integration::SetUp(); } +}; -#include "driver_utils.hpp" -#include "objects/cluster.hpp" - -TEST(ConfigTest, Options) { +CASSANDRA_INTEGRATION_TEST_F(ConfigTests, Options) { test::driver::Cluster cluster = test::driver::Cluster::build().with_connect_timeout(9999u).with_port(7000); EXPECT_EQ(9999u, test::driver::internals::Utils::connect_timeout(cluster.get())); EXPECT_EQ(7000, test::driver::internals::Utils::port(cluster.get())); } -TEST(ConfigTest, ContactPointsSimple) { +CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsSimple) { std::string contact_points = "127.0.0.1,127.0.0.2,127.0.0.3"; test::driver::Cluster cluster = test::driver::Cluster::build().with_contact_points(contact_points); @@ -36,7 +36,7 @@ TEST(ConfigTest, ContactPointsSimple) { test::driver::internals::Utils::contact_points(cluster.get()).c_str()); } -TEST(ConfigTest, ContactPointsClear) { +CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsClear) { std::string contact_points = "127.0.0.1,127.0.0.2,127.0.0.3"; test::driver::Cluster cluster = test::driver::Cluster::build().with_contact_points(contact_points); @@ -46,7 +46,7 @@ TEST(ConfigTest, ContactPointsClear) { EXPECT_TRUE(test::driver::internals::Utils::contact_points(cluster.get()).empty()); } -TEST(ConfigTest, ContactPointsExtraCommas) { +CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsExtraCommas) { std::string contact_points = ",,,,127.0.0.1,,,,127.0.0.2,127.0.0.3,,,,"; test::driver::Cluster cluster = test::driver::Cluster::build().with_contact_points(contact_points); @@ -54,7 +54,7 @@ TEST(ConfigTest, ContactPointsExtraCommas) { test::driver::internals::Utils::contact_points(cluster.get()).c_str()); } -TEST(ConfigTest, ContactPointsExtraWhitespace) { +CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsExtraWhitespace) { std::string contact_points = " ,\r\n, , , 127.0.0.1 ,,, ,\t127.0.0.2,127.0.0.3, \t\n, ,, "; test::driver::Cluster cluster = @@ -63,7 +63,7 @@ TEST(ConfigTest, ContactPointsExtraWhitespace) { test::driver::internals::Utils::contact_points(cluster.get()).c_str()); } -TEST(ConfigTest, ContactPointsAppend) { +CASSANDRA_INTEGRATION_TEST_F(ConfigTests, ContactPointsAppend) { test::driver::Cluster cluster = test::driver::Cluster::build().with_contact_points("127.0.0.1"); EXPECT_STREQ("127.0.0.1", test::driver::internals::Utils::contact_points(cluster.get()).c_str()); cluster.with_contact_points("127.0.0.2"); diff --git a/gtests/src/integration/tests/test_control_connection.cpp b/gtests/src/integration/tests/test_control_connection.cpp index f10934d63..7cca81384 100644 --- a/gtests/src/integration/tests/test_control_connection.cpp +++ b/gtests/src/integration/tests/test_control_connection.cpp @@ -273,8 +273,10 @@ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionTwoNodeClusterTests, Reconnection) * and ensure only the first node is used as the contact point for automatic * node discovery of the second node */ - Cluster cluster = default_cluster().with_load_balance_round_robin().with_contact_points( - generate_contact_points(ccm_->get_ip_prefix(), 1)); + Cluster cluster = default_cluster() + .with_load_balance_round_robin() + .with_constant_reconnect(100) + .with_contact_points(generate_contact_points(ccm_->get_ip_prefix(), 1)); Session session = cluster.connect(); // Stop the first node and bootstrap a third node into the cluster @@ -554,7 +556,8 @@ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionThreeNodeClusterTests, NodeDiscove */ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionTests, FullOutage) { CHECK_FAILURE; - connect(); // Create the default session + Cluster cluster = default_cluster().with_constant_reconnect(100); + connect(cluster); // Stop the cluster and attempt to perform a request ccm_->stop_cluster(); @@ -567,12 +570,11 @@ CASSANDRA_INTEGRATION_TEST_F(ControlConnectionTests, FullOutage) { for (unsigned short i = 0; i < cluster_ip_addresses.size(); ++i) { nodes.insert(i + 1); } - reset_logger_criteria("Scheduling reconnect for host ", nodes); + reset_logger_criteria("reconnect for host ", nodes); // Restart the cluster and wait for the nodes to reconnect ccm_->start_cluster(); ASSERT_TRUE(wait_for_logger(nodes.size())); - msleep(3000); // TODO: Remove static sleep and check driver logs for reduced wait // Ensure all nodes are actively used std::set expected_nodes; diff --git a/gtests/src/integration/tests/test_dbaas.cpp b/gtests/src/integration/tests/test_dbaas.cpp new file mode 100644 index 000000000..c82b5e10c --- /dev/null +++ b/gtests/src/integration/tests/test_dbaas.cpp @@ -0,0 +1,746 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "integration.hpp" + +#include "process.hpp" + +#define PROXY_CREDS_V1_INVALID_CA_FILENAME "creds-v1-invalid-ca.zip" +#define PROXY_CREDS_V1_UNREACHABLE_FILENAME "creds-v1-unreachable.zip" +#define PROXY_CREDS_V1_NO_CERT_FILENAME "creds-v1-wo-cert.zip" +#define PROXY_CREDS_V1_NO_CREDS_FILENAME "creds-v1-wo-creds.zip" +#define PROXY_CREDS_V1_FILENAME "creds-v1.zip" + +#ifdef WIN32 +#define PROXY_RUN_SCRIPT "run.ps1" +#define PROXY_CREDS_BUNDLES "certs\\bundles\\" +#else +#define PROXY_RUN_SCRIPT "run.sh" +#define PROXY_CREDS_BUNDLES "certs/bundles/" +#endif + +using test::Utils; +using utils::Process; + +/** + * Database as a service integration tests + */ +class DbaasTests : public Integration { +public: + typedef std::map ServerNames; + typedef std::pair ServerPair; + + static void SetUpTestCase() { + char* proxy_path = getenv("PROXY_PATH"); + if (proxy_path) { + proxy_path_ = proxy_path; + } else { + proxy_path_ = Utils::home_directory() + Utils::PATH_SEPARATOR + "proxy"; + } + proxy_path_ += Utils::PATH_SEPARATOR; + proxy_run_script_ = proxy_path_ + PROXY_RUN_SCRIPT; + + // Allow the proxy to start itself or use a currently running proxy + if (file_exists(proxy_run_script_)) { + if (!start_proxy()) { + FAIL() << "Unable to start SNI single endpoint proxy service. Check PROXY_PATH environment " + "variable" +#ifdef WIN32 + << " or ensure proper ExecutionPolicy is set (e.g. Set-ExecutionPolicy -Scope " + "CurrentUser Unrestricted); see " + "https:/go.microsoft.com/fwlink/?LinkID=135170" +#endif + << "."; + } + } else { + if (!is_proxy_running()) { + FAIL() + << "SNI single endpoint proxy is not available. Start container before executing test."; + } + } + + if (!file_exists(proxy_cred_bundles_path_)) { + proxy_cred_bundles_path_ = proxy_path_ + proxy_cred_bundles_path_; + } + if (!file_exists(creds_v1_invalid_ca()) || !file_exists(creds_v1_unreachable()) || + !file_exists(creds_v1_no_cert()) || !file_exists(creds_v1_no_creds()) || + !file_exists(creds_v1())) { + FAIL() << "Unable to locate SNI single endpoint credential bundles. Check PROXY_PATH " + "environment variable."; + } + } + + void SetUp() { + // Ensure CCM and session are not created for these tests + is_ccm_requested_ = false; + is_session_requested_ = false; + is_schema_metadata_ = true; // Needed for prepared statements + Integration::SetUp(); + } + + static void TearDownTestCase() { + if (!Options::keep_clusters()) { + stop_proxy(); + } + } + + static std::string creds_v1_invalid_ca() { + return proxy_cred_bundles_path_ + PROXY_CREDS_V1_INVALID_CA_FILENAME; + } + + static std::string creds_v1_unreachable() { + return proxy_cred_bundles_path_ + PROXY_CREDS_V1_UNREACHABLE_FILENAME; + } + + static std::string creds_v1_no_cert() { + return proxy_cred_bundles_path_ + PROXY_CREDS_V1_NO_CERT_FILENAME; + } + + static std::string creds_v1_no_creds() { + return proxy_cred_bundles_path_ + PROXY_CREDS_V1_NO_CREDS_FILENAME; + } + + static std::string creds_v1() { return proxy_cred_bundles_path_ + PROXY_CREDS_V1_FILENAME; } + + int get_node_id(const std::string& rpc_address) { + std::vector octects = explode(rpc_address, '.'); + std::stringstream ss(octects[octects.size() - 1]); + int node = 0; + if ((ss >> node).fail()) { + EXPECT_TRUE(false) << "Unable to parse node number from rpc_address"; + } + return node; + } + + /** + * Vector of server names sorted by node number (e.g. last octet in real IP address) + */ + ServerNames get_server_names() { + ServerNames map; + { + Cluster cluster = default_cluster(false) + .with_randomized_contact_points(false) + .with_load_balance_round_robin(); + EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + Session session = cluster.connect(); + for (int i = 0; i < 3; ++i) { + Row row = session.execute(SELECT_ALL_SYSTEM_LOCAL_CQL).first_row(); + int node = get_node_id(row.column_by_name("rpc_address").str()); + map.insert(ServerPair(node, row.column_by_name("host_id").str())); + } + } + return map; + } + + bool start_cluster() { + Process::Args args; + args.push_back("start"); + args.push_back("--root"); + args.push_back("--wait-for-binary-proto"); + args.push_back("--jvm_arg=-Ddse.product_type=DATASTAX_APOLLO"); + return ccm_execute(args); + } + + bool stop_cluster() { + Process::Args args; + args.push_back("stop"); + return ccm_execute(args); + } + + bool start_node(int node) { + Process::Args args; + args.push_back(node_name(node)); + args.push_back("start"); + args.push_back("--root"); + args.push_back("--wait-for-binary-proto"); + args.push_back("--jvm_arg=-Ddse.product_type=DATASTAX_APOLLO"); + return ccm_execute(args); + } + + bool stop_node(int node) { + Process::Args args; + args.push_back(node_name(node)); + args.push_back("stop"); + return ccm_execute(args); + } + +private: + std::string node_name(int node) { + std::stringstream node_name; + node_name << "node" << node; + return node_name.str(); + } + + bool ccm_execute(Process::Args args) { + Process::Args command; + command.push_back("docker"); + command.push_back("exec"); + command.push_back(get_proxy_id()); + command.push_back("ccm"); + command.insert(command.end(), args.begin(), args.end()); + Process::Result result = Process::execute(command); + return result.exit_status == 0; + } + +private: + static std::string get_proxy_id() { + if (proxy_id_.empty()) { + Process::Args command; + command.push_back("docker"); + command.push_back("ps"); + command.push_back("-aqf"); + command.push_back("ancestor=single_endpoint"); + Process::Result result = Process::execute(command); + proxy_id_ = Utils::trim(result.standard_output); + } + return proxy_id_; + } + + static bool is_proxy_running() { return !get_proxy_id().empty(); } + + static bool start_proxy() { + if (is_proxy_running()) return true; + + Process::Args command; +#ifdef WIN32 + command.push_back("powershell"); +#endif + command.push_back(proxy_run_script_); + Process::Result result = Process::execute(command); + return result.exit_status == 0; + } + + static bool stop_proxy() { + Process::Args command; + command.push_back("docker"); + command.push_back("kill"); + command.push_back(get_proxy_id()); + Process::Result result = Process::execute(command); + return result.exit_status == 0; + } + +private: + static std::string proxy_path_; + static std::string proxy_cred_bundles_path_; + static std::string proxy_run_script_; + static std::string proxy_id_; +}; + +std::string DbaasTests::proxy_path_; +std::string DbaasTests::proxy_cred_bundles_path_ = PROXY_CREDS_BUNDLES; +std::string DbaasTests::proxy_run_script_ = PROXY_RUN_SCRIPT; +std::string DbaasTests::proxy_id_; + +/** + * Perform connection to DBaaS SNI single endpoint docker image. + * + * This test will perform a connection to a DBaaS SNI single endpoint while ensuring proper + * automatic cloud configuration with address resolution. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @since 2.14.0 + * @expected_result Successful address resolution and connection. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ResolveAndConnect) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); +} + +/** + * Perform query using a simple statement against the DBaaS SNI single endpoint docker image. + * + * This test will perform a connection and execute a simple statement query against the + * system.local table to ensure query execution to a DBaaS SNI single endpoint while validating the + * results. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @test_category queries + * @since 2.14.0 + * @expected_result Simple statement is executed and nodes are validated. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, QueryEachNode) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false).with_load_balance_round_robin(); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + ServerNames server_names; + for (int i = 0; i < 3; ++i) { + Result result = session_.execute(SELECT_ALL_SYSTEM_LOCAL_CQL); + Uuid expected_host_id = Uuid(result.server_name()); + Row row = result.first_row(); + + Uuid host_id = row.column_by_name("host_id"); + int node = get_node_id(row.column_by_name("rpc_address").str()); + EXPECT_NE(0, node); + EXPECT_EQ(expected_host_id, host_id); + server_names.insert(ServerPair(node, host_id.str())); + } + + EXPECT_EQ(3u, server_names.size()); // Ensure all three nodes were queried +} + +/** + * Create function and aggregate definitions and ensure the schema metadata is reflected when + * execute against the DBaaS SNI single endpoint docker image. + * + * This test will perform a connection and execute create function/aggregate queries to ensure + * schema metadata using a DBaaS SNI single endpoint is handled properly. + * + * @jira_ticket CPP-815 + * @test_category dbaas + * @test_category queries:schema_metadata:udf + * @since 2.14.0 + * @expected_result Function/Aggregate definitions schema metadata are validated. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, SchemaMetadata) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + // clang-format off + session_.execute("CREATE OR REPLACE FUNCTION avg_state(state tuple, val int) " + "CALLED ON NULL INPUT RETURNS tuple " + "LANGUAGE java AS " + "'if (val != null) {" + "state.setInt(0, state.getInt(0) + 1);" + "state.setLong(1, state.getLong(1) + val.intValue());" + "};" + "return state;'" + ";"); + session_.execute("CREATE OR REPLACE FUNCTION avg_final (state tuple) " + "CALLED ON NULL INPUT RETURNS double " + "LANGUAGE java AS " + "'double r = 0;" + "if (state.getInt(0) == 0) return null;" + "r = state.getLong(1);" + "r /= state.getInt(0);" + "return Double.valueOf(r);'" + ";"); + session_.execute("CREATE OR REPLACE AGGREGATE average(int) " + "SFUNC avg_state STYPE tuple FINALFUNC avg_final " + "INITCOND(0, 0);"); + // clang-format on + + const CassSchemaMeta* schema_meta = cass_session_get_schema_meta(session_.get()); + ASSERT_TRUE(schema_meta != NULL); + const CassKeyspaceMeta* keyspace_meta = + cass_schema_meta_keyspace_by_name(schema_meta, default_keyspace().c_str()); + ASSERT_TRUE(keyspace_meta != NULL); + + { // Function `avg_state` + const char* data = NULL; + size_t length = 0; + const CassDataType* datatype = NULL; + + const CassFunctionMeta* function_meta = + cass_keyspace_meta_function_by_name(keyspace_meta, "avg_state", "tuple,int"); + ASSERT_TRUE(function_meta != NULL); + cass_function_meta_name(function_meta, &data, &length); + EXPECT_EQ("avg_state", std::string(data, length)); + cass_function_meta_full_name(function_meta, &data, &length); + EXPECT_EQ("avg_state(tuple,int)", std::string(data, length)); + cass_function_meta_body(function_meta, &data, &length); + EXPECT_EQ("if (val != null) {state.setInt(0, state.getInt(0) + 1);state.setLong(1, " + "state.getLong(1) + val.intValue());};return state;", + std::string(data, length)); + cass_function_meta_language(function_meta, &data, &length); + EXPECT_EQ("java", std::string(data, length)); + EXPECT_TRUE(cass_function_meta_called_on_null_input(function_meta)); + ASSERT_EQ(2u, cass_function_meta_argument_count(function_meta)); + cass_function_meta_argument(function_meta, 0, &data, &length, &datatype); + EXPECT_EQ("state", std::string(data, length)); + EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype)); + ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype)); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0))); + EXPECT_EQ(CASS_VALUE_TYPE_BIGINT, + cass_data_type_type(cass_data_type_sub_data_type(datatype, 1))); + cass_function_meta_argument(function_meta, 1, &data, &length, &datatype); + EXPECT_EQ("val", std::string(data, length)); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(datatype)); + datatype = cass_function_meta_argument_type_by_name(function_meta, "state"); + EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype)); + ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype)); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0))); + EXPECT_EQ(CASS_VALUE_TYPE_BIGINT, + cass_data_type_type(cass_data_type_sub_data_type(datatype, 1))); + datatype = cass_function_meta_argument_type_by_name(function_meta, "val"); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(datatype)); + datatype = cass_function_meta_return_type(function_meta); + EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype)); + ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype)); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0))); + EXPECT_EQ(CASS_VALUE_TYPE_BIGINT, + cass_data_type_type(cass_data_type_sub_data_type(datatype, 1))); + } + + { // Aggregate `average` + const char* data = NULL; + size_t length = 0; + const CassDataType* datatype = NULL; + + const CassAggregateMeta* aggregate_meta = + cass_keyspace_meta_aggregate_by_name(keyspace_meta, "average", "int"); + ASSERT_TRUE(aggregate_meta != NULL); + cass_aggregate_meta_name(aggregate_meta, &data, &length); + EXPECT_EQ("average", std::string(data, length)); + cass_aggregate_meta_full_name(aggregate_meta, &data, &length); + EXPECT_EQ("average(int)", std::string(data, length)); + size_t count = cass_aggregate_meta_argument_count(aggregate_meta); + ASSERT_EQ(1u, cass_aggregate_meta_argument_count(aggregate_meta)); + datatype = cass_aggregate_meta_argument_type(aggregate_meta, 0); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(datatype)); + datatype = cass_aggregate_meta_return_type(aggregate_meta); + EXPECT_EQ(CASS_VALUE_TYPE_DOUBLE, cass_data_type_type(datatype)); + datatype = cass_aggregate_meta_state_type(aggregate_meta); + EXPECT_EQ(CASS_VALUE_TYPE_TUPLE, cass_data_type_type(datatype)); + ASSERT_EQ(2u, cass_data_type_sub_type_count(datatype)); + EXPECT_EQ(CASS_VALUE_TYPE_INT, cass_data_type_type(cass_data_type_sub_data_type(datatype, 0))); + EXPECT_EQ(CASS_VALUE_TYPE_BIGINT, + cass_data_type_type(cass_data_type_sub_data_type(datatype, 1))); + const CassFunctionMeta* function_meta = cass_aggregate_meta_state_func(aggregate_meta); + cass_function_meta_name(function_meta, &data, &length); + EXPECT_EQ("avg_state", std::string(data, length)); + function_meta = cass_aggregate_meta_final_func(aggregate_meta); + cass_function_meta_name(function_meta, &data, &length); + EXPECT_EQ("avg_final", std::string(data, length)); + const CassValue* initcond = cass_aggregate_meta_init_cond(aggregate_meta); + EXPECT_EQ(CASS_VALUE_TYPE_VARCHAR, cass_value_type(initcond)); + EXPECT_EQ(Text("(0, 0)"), Text(initcond)); + ASSERT_TRUE(true); + } + + cass_schema_meta_free(schema_meta); +} + +/** + * Ensure guardrails are enabled when performing a query against the DBaaS SNI single endpoint + * docker image. + * + * This test will perform a connection and execute a simple insert statement query against the + * server using a valid consistency level.DBaaS SNI single endpoint while validating the + * insert occured. + * + * @jira_ticket CPP-813 + * @test_category dbaas + * @test_category queries:guard_rails + * @since 2.14.0 + * @expected_result Simple statement is executed and is validated. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ConsistencyGuardrails) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + session_.execute( + format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, default_table().c_str(), "int", "int")); + CHECK_FAILURE; + + session_.execute(Statement( + format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, default_table().c_str(), "0", "1"))); + Result result = session_.execute( + Statement(format_string(CASSANDRA_SELECT_VALUE_FORMAT, default_table().c_str(), "0"))); + EXPECT_EQ(1u, result.row_count()); + ASSERT_EQ(1u, result.column_count()); + ASSERT_EQ(Integer(1), result.first_row().next().as()); +} + +/** + * Ensure guardrails are enabled when performing a query against the DBaaS SNI single endpoint + * docker image. + * + * This test will perform a connection and execute a simple statement query against the + * server using an invalid consistency level.DBaaS SNI single endpoint while validating the + * error. + * + * @jira_ticket CPP-813 + * @test_category dbaas + * @test_category queries:guard_rails + * @since 2.14.0 + * @expected_result Simple statement is executed and guard rail error is validated. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ConsistencyGuardrailsInvalid) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + session_.execute( + format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, default_table().c_str(), "int", "int")); + CHECK_FAILURE + + Statement statement( + format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, default_table().c_str(), "0", "1")); + statement.set_consistency( + CASS_CONSISTENCY_LOCAL_ONE); // Override default DBaaS configured consistency + Result result = session_.execute(statement, false); + EXPECT_TRUE(result.error_code() != CASS_OK) + << "Statement execution succeeded; guardrails may not be enabled"; + EXPECT_TRUE(contains(result.error_message(), + "Provided value LOCAL_ONE is not allowed for Write Consistency Level")); +} + +/** + * Perform query ensuring token aware is enabled by default. + * + * This test will perform a connection and execute a insert query against to ensure that token + * aware is enabled by default when automatically configured . + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @test_category queries + * @since 2.14.0 + * @expected_result Simple statement is executed and validated against replicas. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, DcAwareTokenAwareRoutingDefault) { + CHECK_FAILURE; + + ServerNames server_names = get_server_names(); + + // Validate replicas are used during token aware routing + std::vector > replicas; + replicas.push_back(std::pair(0, 2)); // query key, node id (last octet of rpc_address) + replicas.push_back(std::pair(1, 2)); + replicas.push_back(std::pair(2, 2)); + replicas.push_back(std::pair(3, 1)); + replicas.push_back(std::pair(4, 3)); + replicas.push_back(std::pair(5, 2)); + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + for (std::vector >::iterator it = replicas.begin(), end = replicas.end(); + it != end; ++it) { + Statement statement(SELECT_ALL_SYSTEM_LOCAL_CQL, 1); + statement.set_consistency(CASS_CONSISTENCY_ONE); + statement.add_key_index(0); + statement.set_keyspace("system"); + statement.bind(0, Integer(it->first)); + + Result result = session_.execute( + statement, false); // No bind variables exist so statement will return error + EXPECT_EQ(server_names[it->second], result.server_name()); + } +} + +/** + * Attempt connection to DBaaS SNI single endpoint docker image manually setting auth. + * + * This test will perform a connection to a DBaaS SNI single endpoint while ensuring proper + * automatic cloud configuration with address resolution where the authentication is not available. + * + * @jira_ticket CPP-787 + * @test_category dbaas:auth + * @since 2.14.0 + * @expected_result Successful address resolution and connection. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, ResolveAndConnectWithoutCredsInBundle) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1_no_creds().c_str())); + cluster.with_credentials("cassandra", "cassandra"); + connect(cluster); +} + +/** + * Attempt connection to DBaaS SNI single endpoint docker image leaving auth unset. + * + * This test will perform a connection to a DBaaS SNI single endpoint while ensuring proper + * automatic cloud configuration with address resolution where the authentication is not set. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @since 2.14.0 + * @expected_result Failed to establish a connection. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidWithoutCreds) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1_no_creds().c_str())); + try { + connect(cluster); + EXPECT_TRUE(false) << "Connection established"; + } catch (Session::Exception& se) { + EXPECT_EQ(CASS_ERROR_SERVER_BAD_CREDENTIALS, se.error_code()); + } +} + +/** + * Attempt connection to DBaaS SNI single endpoint docker image using invalid metadata server. + * + * This test will attempt a connection to a DBaaS SNI single endpoint using an invalid metadata + * server. The connection should not succeed as no resolution will be possible. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @since 2.14.0 + * @expected_result Failed to establish a connection. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidMetadataServer) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1_unreachable().c_str())); + try { + connect(cluster); + EXPECT_TRUE(false) << "Connection established"; + } catch (Session::Exception& se) { + EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, se.error_code()); + } +} + +/** + * Attempt connection to DBaaS SNI single endpoint docker image using invalid certificate. + * + * This test will attempt a connection to a DBaaS SNI single endpoint using an invalid certificate. + * The connection should not succeed as no resolution will be possible. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @since 2.14.0 + * @expected_result Failed to establish a connection. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidCertificate) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, + cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1_no_cert().c_str())); + try { + connect(cluster); + EXPECT_TRUE(false) << "Connection established"; + } catch (Session::Exception& se) { + EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, se.error_code()); + } +} + +/** + * Attempt connection to DBaaS SNI single endpoint docker image using invalid CA. + * + * This test will attempt a connection to a DBaaS SNI single endpoint using an invalid CA. The + * connection should not succeed as no resolution will be possible. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @since 2.14.0 + * @expected_result Failed to establish a connection. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, InvalidCertificateAuthority) { + CHECK_FAILURE; + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1_invalid_ca().c_str())); + try { + connect(cluster); + EXPECT_TRUE(false) << "Connection established"; + } catch (Session::Exception& se) { + EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, se.error_code()); + } +} + +/** + * Perform query with nodes down against the DBaaS SNI single endpoint docker image. + * + * This test will perform a connection and execute a simple statement query against the + * system.local table to ensure query execution to a DBaaS SNI single endpoint while validating the + * results. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @test_category queries + * @since 2.14.0 + * @expected_result Simple statement is executed and validated while node(s) are down. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, QueryWithNodesDown) { + CHECK_FAILURE; + + ServerNames server_names = get_server_names(); + + Cluster cluster = default_cluster(false); + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + EXPECT_TRUE(stop_node(1)); + for (int i = 0; i < 8; ++i) { + EXPECT_NE(server_names[1], session_.execute(SELECT_ALL_SYSTEM_LOCAL_CQL).server_name()); + } + + EXPECT_TRUE(stop_node(3)); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(server_names[2], session_.execute(SELECT_ALL_SYSTEM_LOCAL_CQL).server_name()); + } + + EXPECT_TRUE(start_cluster()); +} + +/** + * Ensure reconnection occurs during full outage. + * + * This test will perform a connection, full outage will occur and the the cluster will be restarted + * while executing a simple statement query against the system.local table to ensure reconnection + * after full outage. + * + * @jira_ticket CPP-787 + * @test_category dbaas + * @test_category queries + * @since 2.14.0 + * @expected_result Simple statement is executed and validated after full outage. + */ +CASSANDRA_INTEGRATION_TEST_F(DbaasTests, FullOutage) { + CHECK_FAILURE; + + ServerNames server_names = get_server_names(); + + Cluster cluster = default_cluster(false).with_constant_reconnect(10); // Quick reconnect + ASSERT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster.get(), creds_v1().c_str())); + connect(cluster); + + EXPECT_TRUE(stop_cluster()); + + Statement statement(SELECT_ALL_SYSTEM_LOCAL_CQL); + EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, session_.execute(statement, false).error_code()); + + EXPECT_TRUE(start_cluster()); + EXPECT_EQ(CASS_OK, session_.execute(statement).error_code()); +} diff --git a/gtests/src/integration/tests/test_exec_profile.cpp b/gtests/src/integration/tests/test_exec_profile.cpp index ec565f2c8..8f23b1ae9 100644 --- a/gtests/src/integration/tests/test_exec_profile.cpp +++ b/gtests/src/integration/tests/test_exec_profile.cpp @@ -302,8 +302,15 @@ CASSANDRA_INTEGRATION_TEST_F(ExecutionProfileTest, Consistency) { batch.set_execution_profile("consistency"); result = session_.execute(batch, false); ASSERT_EQ(CASS_ERROR_SERVER_INVALID_QUERY, result.error_code()); - ASSERT_TRUE(contains(result.error_message(), - "SERIAL is not supported as conditional update commit consistency")); + CCM::CassVersion cass_version = server_version_; + if (!Options::is_cassandra()) { + cass_version = static_cast(cass_version).get_cass_version(); + } + std::string expected_message = "SERIAL is not supported as conditional update commit consistency"; + if (cass_version >= "4.0.0") { + expected_message = "You must use conditional updates for serializable writes"; + } + ASSERT_TRUE(contains(result.error_message(), expected_message)); // Execute a simple query with assigned profile (should fail) insert_.set_execution_profile("consistency"); diff --git a/gtests/src/integration/tests/test_null_string_params.cpp b/gtests/src/integration/tests/test_null_string_params.cpp index 7a618b376..b44564376 100644 --- a/gtests/src/integration/tests/test_null_string_params.cpp +++ b/gtests/src/integration/tests/test_null_string_params.cpp @@ -96,7 +96,7 @@ class SchemaNullStringApiArgsTest : public NullStringApiArgsTest { if (server_version_ >= "3.0.0") { session_.execute(format_string("CREATE MATERIALIZED VIEW %s " - "AS SELECT value " + "AS SELECT value, key " " FROM %s" " WHERE value IS NOT NULL and key IS NOT NULL " "PRIMARY KEY(value, key)", @@ -319,9 +319,9 @@ CASSANDRA_INTEGRATION_TEST_F(SchemaNullStringApiArgsTest, MaterializedViewMetaFu */ CASSANDRA_INTEGRATION_TEST_F(SchemaNullStringApiArgsTest, FunctionAndAggregateMetaFunctions) { CHECK_VERSION(2.2.0); - // C* 3.x and later annotate collection columns as frozen. + // C* 3.x annotate collection columns as frozen. const CassFunctionMeta* function_meta = - (schema_meta_.version().major_version >= 3) + (schema_meta_.version().major_version == 3) ? cass_keyspace_meta_function_by_name(keyspace_meta_.get(), "avg_final", "frozen>") : cass_keyspace_meta_function_by_name(keyspace_meta_.get(), "avg_final", diff --git a/gtests/src/integration/tests/test_prepared.cpp b/gtests/src/integration/tests/test_prepared.cpp new file mode 100644 index 000000000..4dd52158d --- /dev/null +++ b/gtests/src/integration/tests/test_prepared.cpp @@ -0,0 +1,110 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "integration.hpp" + +/** + * Prepared integration tests; common operations + */ +class PreparedTests : public Integration { + void SetUp() { + is_keyspace_change_requested_ = false; + Integration::SetUp(); + } +}; + +/** + * Execute a statement that forces a re-prepare resulting in a new prepared ID that fails fast and + * returns an error. + * + * This test will create a new table, prepare a statement using a fully qualified query, update the + * default keyspace, then drop and re-create the table to force the server to invalidate the + * prepared ID. After the table is dropped the prepared statement will be used to execute an insert + * query that will result in an error being returned when re-using the original prepared statement. + * + * @see: https://issues.apache.org/jira/browse/CASSANDRA-15252 (Server version restriction may need + * to be added if/when Apache Cassandra issue is addressed. + * + * @test_category error + * @test_category queries:prepared + * @since core:2.14.0 + * @expected_result Re-prepare will fail fast and return error. + */ +CASSANDRA_INTEGRATION_TEST_F(PreparedTests, FailFastWhenPreparedIDChangesDuringReprepare) { + CHECK_FAILURE; + + // Create the table and initial prepared statement + session_.execute(format_string(CASSANDRA_KEY_VALUE_QUALIFIED_TABLE_FORMAT, keyspace_name_.c_str(), + table_name_.c_str(), "int", "int")); + Prepared insert_prepared = + session_.prepare(format_string(CASSANDRA_KEY_VALUE_QUALIFIED_INSERT_FORMAT, + keyspace_name_.c_str(), table_name_.c_str(), "?", "?")); + + // Update the current keyspace for the session + ASSERT_TRUE(use_keyspace(keyspace_name_)); + + // Drop and re-create the table to invalidate the prepared statement on the server + drop_table(table_name_); + session_.execute(format_string(CASSANDRA_KEY_VALUE_QUALIFIED_TABLE_FORMAT, keyspace_name_.c_str(), + table_name_.c_str(), "int", "int")); + + // Execute the insert statement and validate the error code + logger_.add_critera("ID mismatch while trying to prepare query"); + Statement insert_statement = insert_prepared.bind(); + insert_statement.bind(0, Integer(0)); + insert_statement.bind(1, Integer(1)); + Result result = session_.execute(insert_statement, false); + EXPECT_TRUE(contains(result.error_message(), "ID mismatch while trying to prepare query")); +} + +/** + * Execute a statement that forces a re-prepare resulting in a same prepared ID. + * + * This test will connect to a cluster and use a keyspace, prepare a statement using a unqualified + * query, then drop and re-create the table to force the server to invalidate the + * prepared ID. After the table is dropped the prepared statement will be used to execute an insert + * query that will result the statement being re-prepared and the insert statement succeeding. + * + * @test_category queries:prepared + * @since core:1.0.0 + * @expected_result Re-prepare will correctly execute the insert statement. + */ +CASSANDRA_INTEGRATION_TEST_F(PreparedTests, PreparedIDUnchangedDuringReprepare) { + CHECK_FAILURE; + + // Allow for unqualified queries + use_keyspace(keyspace_name_); + + // Create the table and initial prepared statement + session_.execute( + format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, table_name_.c_str(), "int", "int")); + Prepared insert_prepared = session_.prepare( + format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, table_name_.c_str(), "?", "?")); + + // Drop and re-create the table to invalidate the prepared statement on the server + drop_table(table_name_); + session_.execute( + format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, table_name_.c_str(), "int", "int")); + + // Execute the insert statement and validate success + logger_.add_critera("Prepared query with ID"); + Statement insert_statement = insert_prepared.bind(); + insert_statement.bind(0, Integer(0)); + insert_statement.bind(1, Integer(1)); + Result result = session_.execute(insert_statement, false); + EXPECT_EQ(CASS_OK, result.error_code()); + EXPECT_EQ(1u, logger_.count()); +} diff --git a/gtests/src/integration/tests/test_schema_metadata.cpp b/gtests/src/integration/tests/test_schema_metadata.cpp index 8adbc07f7..29c74ecf3 100644 --- a/gtests/src/integration/tests/test_schema_metadata.cpp +++ b/gtests/src/integration/tests/test_schema_metadata.cpp @@ -61,7 +61,7 @@ class SchemaMetadataTest : public Integration { if (server_version_ >= "3.0.0") { session_.execute(format_string("CREATE MATERIALIZED VIEW %s " - "AS SELECT value " + "AS SELECT value, key " " FROM %s" " WHERE value IS NOT NULL and key IS NOT NULL " "PRIMARY KEY(value, key)", diff --git a/gtests/src/integration/tests/test_session.cpp b/gtests/src/integration/tests/test_session.cpp index 24234009c..3c01f2f6c 100644 --- a/gtests/src/integration/tests/test_session.cpp +++ b/gtests/src/integration/tests/test_session.cpp @@ -140,7 +140,7 @@ CASSANDRA_INTEGRATION_TEST_F(SessionTest, ExternalHostListener) { // Restart node 1 (up event) ccm_->start_node(1); CCM::CassVersion cass_version = this->server_version_; - if (Options::is_dse()) { + if (!Options::is_cassandra()) { cass_version = static_cast(cass_version).get_cass_version(); } if (cass_version >= "2.2") { diff --git a/gtests/src/integration/tests/test_startup_options.cpp b/gtests/src/integration/tests/test_startup_options.cpp index 087252e98..77d759038 100644 --- a/gtests/src/integration/tests/test_startup_options.cpp +++ b/gtests/src/integration/tests/test_startup_options.cpp @@ -19,7 +19,7 @@ /** * Startup options integration tests */ -class StartupOptionssTests : public Integration {}; +class StartupOptionsTests : public Integration {}; /** * Verify driver name and version are assigned in startup options. @@ -34,10 +34,10 @@ class StartupOptionssTests : public Integration {}; * @cassandra_version 4.0.0 * @expected_result Driver startup options are validated. */ -CASSANDRA_INTEGRATION_TEST_F(StartupOptionssTests, DriverOptions) { +CASSANDRA_INTEGRATION_TEST_F(StartupOptionsTests, DriverOptions) { CHECK_FAILURE; CHECK_VERSION(4.0.0); - if (Options::is_dse()) { + if (!Options::is_cassandra()) { SKIP_TEST("Unsupported for DataStax Enterprise Version " << server_version_.to_string() << ": 'system_views.clients' is unavailable"); } diff --git a/gtests/src/integration/tests/test_statement.cpp b/gtests/src/integration/tests/test_statement.cpp index 20f1930ef..e51d5dd90 100644 --- a/gtests/src/integration/tests/test_statement.cpp +++ b/gtests/src/integration/tests/test_statement.cpp @@ -104,6 +104,11 @@ CASSANDRA_INTEGRATION_TEST_F(StatementTests, SetHostWhereHostIsDown) { EXPECT_EQ(result.error_code(), CASS_ERROR_LIB_NO_HOSTS_AVAILABLE); } +class StatementNoClusterTests : public StatementTests { +public: + StatementNoClusterTests() { is_ccm_requested_ = false; } +}; + /** * Set a host on a statement using valid host strings. * @@ -111,7 +116,7 @@ CASSANDRA_INTEGRATION_TEST_F(StatementTests, SetHostWhereHostIsDown) { * @test_category configuration * @expected_result Success */ -TEST(StatementTest, SetHostWithValidHostString) { +CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithValidHostString) { Statement statement(""); EXPECT_EQ(cass_statement_set_host(statement.get(), "127.0.0.1", 9042), CASS_OK); EXPECT_EQ(cass_statement_set_host(statement.get(), "::1", 9042), CASS_OK); @@ -127,7 +132,7 @@ TEST(StatementTest, SetHostWithValidHostString) { * @test_category configuration * @expected_result Failure with the bad parameters error. */ -TEST(StatementTest, SetHostWithInvalidHostString) { +CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithInvalidHostString) { Statement statement(""); EXPECT_EQ(cass_statement_set_host(statement.get(), "notvalid", 9042), CASS_ERROR_LIB_BAD_PARAMS); EXPECT_EQ(cass_statement_set_host(statement.get(), "", 9042), CASS_ERROR_LIB_BAD_PARAMS); @@ -141,7 +146,7 @@ TEST(StatementTest, SetHostWithInvalidHostString) { * @test_category configuration * @expected_result Success */ -TEST(StatementTest, SetHostWithValidHostInet) { +CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithValidHostInet) { Statement statement(""); CassInet valid; ASSERT_EQ(cass_inet_from_string("127.0.0.1", &valid), CASS_OK); @@ -162,7 +167,7 @@ TEST(StatementTest, SetHostWithValidHostInet) { * @test_category configuration * @expected_result Failure with the bad parameters error. */ -TEST(StatementTest, SetHostWithInvalidHostInet) { +CASSANDRA_INTEGRATION_TEST_F(StatementNoClusterTests, SetHostWithInvalidHostInet) { Statement statement(""); CassInet invalid; invalid.address_length = 3; // Only 4 or 16 is valid (IPv4 and IPv6) diff --git a/gtests/src/unit/http_server.cpp b/gtests/src/unit/http_server.cpp new file mode 100644 index 000000000..aa93df486 --- /dev/null +++ b/gtests/src/unit/http_server.cpp @@ -0,0 +1,121 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "http_server.hpp" + +using datastax::String; +using datastax::internal::Memory; +using datastax::internal::OStringStream; +using datastax::internal::ScopedMutex; +using datastax::internal::core::Address; +using datastax::internal::core::EventLoop; +using datastax::internal::core::Task; + +String response(int status, const String& body = "", const String& content_type = "") { + OStringStream ss; + ss << "HTTP/1.0 " << status << " " << http_status_str(static_cast(status)) << "\r\n"; + if (!body.empty()) { + ss << "Content-Type: "; + if (content_type.empty()) { + ss << "text/plain"; + } else { + ss << content_type; + } + ss << "\r\nContent-Length: " << body.size() << "\r\n\r\n" << body; + } else { + ss << "\r\n"; + } + + return ss.str(); +} + +using namespace mockssandra; +using namespace mockssandra::http; + +void Server::listen() { + server_connection_->listen(&event_loop_group_); + server_connection_->wait_listen(); +} + +void Server::close() { + if (server_connection_) { + server_connection_->close(); + server_connection_->wait_close(); + } +} + +bool Server::use_ssl(const String& key, const String& cert, const String& ca_cert /*= ""*/, + bool require_client_cert /*= false*/) { + return server_connection_->use_ssl(key, cert, ca_cert, require_client_cert); +} + +Server::ClientConnection::ClientConnection(internal::ServerConnection* server_connection, + Server* server) + : internal::ClientConnection(server_connection) + , path_(server->path()) + , content_type_(server->content_type()) + , response_body_(server->response_body()) + , response_status_code_(server->response_status_code()) + , enable_valid_response_(server->enable_valid_response()) + , close_connnection_after_request_(server->close_connnection_after_request()) { + http_parser_init(&parser_, HTTP_REQUEST); + http_parser_settings_init(&parser_settings_); + + parser_.data = this; + parser_settings_.on_url = on_url; +} + +void Server::ClientConnection::on_read(const char* data, size_t len) { + request_ = String(data, len); + size_t parsed = http_parser_execute(&parser_, &parser_settings_, data, len); + if (parsed < static_cast(len)) { + enum http_errno err = HTTP_PARSER_ERRNO(&parser_); + fprintf(stderr, "%s: %s\n", http_errno_name(err), http_errno_description(err)); + close(); + } +} + +int Server::ClientConnection::on_url(http_parser* parser, const char* buf, size_t len) { + ClientConnection* self = static_cast(parser->data); + self->handle_url(buf, len); + return 0; +} + +void Server::ClientConnection::handle_url(const char* buf, size_t len) { + String path(buf, len); + if (path.substr(0, path.find("?")) == path_) { // Compare without query parameters + if (enable_valid_response_) { + if (response_body_.empty()) { + write(response(response_status_code_, request_)); // Echo response + } else { + write(response(response_status_code_, response_body_, content_type_)); + } + } else { + write("Invalid HTTP server response"); + } + } else { + write(response(404)); + } + // From the HTTP/1.0 protocol specification: + // + // > When an Entity-Body is included with a message, the length of that body may be determined in + // > one of two ways. If a Content-Length header field is present, its value in bytes represents + // > the length of the Entity-Body. Otherwise, the body length is determined by the closing of the + // > connection by the server. + if (close_connnection_after_request_) { + close(); + } +} diff --git a/gtests/src/unit/http_server.hpp b/gtests/src/unit/http_server.hpp new file mode 100644 index 000000000..ed5f352d0 --- /dev/null +++ b/gtests/src/unit/http_server.hpp @@ -0,0 +1,122 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef HTTP_MOCK_SERVER_HPP +#define HTTP_MOCK_SERVER_HPP + +#define HTTP_MOCK_HOSTNAME "cpp-driver.hostname." +#define HTTP_MOCK_SERVER_IP "127.254.254.254" +#define HTTP_MOCK_SERVER_PORT 30443 + +#include "http_parser.h" +#include "mockssandra.hpp" +#include "string.hpp" + +namespace mockssandra { namespace http { + +/** + * Mockssandra HTTP server. + * + * If no response body is set then the default response will the be original request; e.g. echo HTTP + * server. + */ +class Server { +public: + Server() + : path_("/") + , content_type_("text/plain") + , response_status_code_(200) + , enable_valid_response_(true) + , close_connnection_after_request_(true) + , event_loop_group_(1, "HTTP Server") + , factory_(this) + , server_connection_(new internal::ServerConnection( + Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), factory_)) {} + + const String& path() const { return path_; } + const String& content_type() const { return content_type_; } + const String& response_body() const { return response_body_; } + int response_status_code() const { return response_status_code_; } + bool enable_valid_response() { return enable_valid_response_; } + bool close_connnection_after_request() { return close_connnection_after_request_; } + + void set_path(const String& path) { path_ = path; } + void set_content_type(const String& content_type) { content_type_ = content_type; } + void set_response_body(const String& response_body) { response_body_ = response_body; } + void set_response_status_code(int status_code) { response_status_code_ = status_code; } + void enable_valid_response(bool enable) { enable_valid_response_ = enable; } + void set_close_connnection_after_request(bool enable) { + close_connnection_after_request_ = enable; + } + + bool use_ssl(const String& key, const String& cert, const String& ca_cert = "", + bool require_client_cert = false); + + void listen(); + void close(); + +private: + class ClientConnection : public internal::ClientConnection { + public: + ClientConnection(internal::ServerConnection* server_connection, Server* server); + + virtual void on_read(const char* data, size_t len); + + private: + static int on_url(http_parser* parser, const char* buf, size_t len); + void handle_url(const char* buf, size_t len); + + private: + String path_; + String content_type_; + String response_body_; + int response_status_code_; + bool enable_valid_response_; + bool close_connnection_after_request_; + String request_; + http_parser parser_; + http_parser_settings parser_settings_; + }; + + class ClientConnectionFactory : public internal::ClientConnectionFactory { + public: + ClientConnectionFactory(Server* server) + : server_(server) {} + + virtual internal::ClientConnection* + create(internal::ServerConnection* server_connection) const { + return new ClientConnection(server_connection, server_); + } + + private: + Server* const server_; + }; + +private: + String path_; + String content_type_; + String response_body_; + int response_status_code_; + bool enable_valid_response_; + bool close_connnection_after_request_; + SimpleEventLoopGroup event_loop_group_; + ClientConnectionFactory factory_; + internal::ServerConnection::Ptr server_connection_; +}; + +}} // namespace mockssandra::http + +#endif diff --git a/gtests/src/unit/http_test.cpp b/gtests/src/unit/http_test.cpp new file mode 100644 index 000000000..5ceea413f --- /dev/null +++ b/gtests/src/unit/http_test.cpp @@ -0,0 +1,61 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "http_test.hpp" + +using namespace datastax; +using namespace datastax::internal::core; + +SocketSettings HttpTest::use_ssl(const String& cn, bool is_server_using_ssl /*= true*/) { + SocketSettings settings; + +#ifdef HAVE_OPENSSL + String ca_key = mockssandra::Ssl::generate_key(); + ca_cert_ = mockssandra::Ssl::generate_cert(ca_key, "CA"); + + key_ = mockssandra::Ssl::generate_key(); + cert_ = mockssandra::Ssl::generate_cert(key_, cn, ca_cert_, ca_key); + + String client_key = mockssandra::Ssl::generate_key(); + String client_cert = mockssandra::Ssl::generate_cert(client_key, cn, ca_cert_, ca_key); + + SslContext::Ptr ssl_context(SslContextFactory::create()); + + ssl_context->set_cert(client_cert.c_str(), client_cert.size()); + ssl_context->set_private_key(client_key.c_str(), client_key.size(), "", + 0); // No password expected for the private key + + ssl_context->add_trusted_cert(ca_cert_.c_str(), ca_cert_.size()); + + settings.ssl_context = ssl_context; + + if (is_server_using_ssl) { + server_.use_ssl(key_, cert_, ca_cert_, true); + } +#endif + + return settings; +} + +void HttpTest::use_ssl(const String& ca_cert, const String& ca_key, const String& cn) { +#ifdef HAVE_OPENSSL + key_ = mockssandra::Ssl::generate_key(); + cert_ = mockssandra::Ssl::generate_cert(key_, cn, ca_cert, ca_key); + ca_cert_ = ca_cert; + + server_.use_ssl(key_, cert_, ca_cert_, true); +#endif +} diff --git a/gtests/src/unit/http_test.hpp b/gtests/src/unit/http_test.hpp new file mode 100644 index 000000000..f4e9d8e7e --- /dev/null +++ b/gtests/src/unit/http_test.hpp @@ -0,0 +1,66 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef HTTP_SERVER_TEST_HPP +#define HTTP_SERVER_TEST_HPP + +#include "http_server.hpp" +#include "loop_test.hpp" +#include "socket_connector.hpp" + +class HttpTest : public LoopTest { +public: + ~HttpTest() { server_.close(); } + + const datastax::String& ca_cert() const { return ca_cert_; } + const datastax::String& cert() const { return cert_; } + const datastax::String& key() const { return key_; } + + void set_path(const datastax::String& path) { server_.set_path(path); } + + void set_content_type(const datastax::String& content_type) { + server_.set_content_type(content_type); + } + + void set_response_body(const datastax::String& response_body) { + server_.set_response_body(response_body); + } + + void set_response_status_code(int status_code) { server_.set_response_status_code(status_code); } + + void enable_valid_response(bool enable) { server_.enable_valid_response(enable); } + + void set_close_connnection_after_request(bool enable) { + server_.set_close_connnection_after_request(enable); + } + + void start_http_server() { server_.listen(); } + void stop_http_server() { server_.close(); } + + datastax::internal::core::SocketSettings use_ssl(const String& cn = HTTP_MOCK_HOSTNAME, + bool is_server_using_ssl = true); + + void use_ssl(const String& ca_cert, const String& ca_key, const String& cn); + +private: + datastax::String ca_cert_; + datastax::String cert_; + datastax::String key_; + + mockssandra::http::Server server_; +}; + +#endif diff --git a/gtests/src/unit/mockssandra.cpp b/gtests/src/unit/mockssandra.cpp index 2620cece0..f174f1481 100644 --- a/gtests/src/unit/mockssandra.cpp +++ b/gtests/src/unit/mockssandra.cpp @@ -25,11 +25,16 @@ #include "tracing_data_handler.hpp" // For tracing query #include "uuids.hpp" +#include +#include +#include + #ifdef WIN32 #include "winsock.h" #endif using datastax::internal::bind_callback; +using datastax::internal::Map; using datastax::internal::Memory; using datastax::internal::OStringStream; using datastax::internal::ScopedMutex; @@ -40,8 +45,48 @@ using datastax::internal::core::UuidGen; #define DSE_VERSION "6.7.1" #define DSE_CASSANDRA_VERSION "4.0.0.671" +#if defined(OPENSSL_VERSION_NUMBER) && \ + !defined(LIBRESSL_VERSION_NUMBER) // Required as OPENSSL_VERSION_NUMBER for LibreSSL is defined + // as 2.0.0 +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) +#define SSL_SERVER_METHOD TLS_server_method +#else +#define SSL_SERVER_METHOD SSLv23_server_method +#endif +#else +#if (LIBRESSL_VERSION_NUMBER >= 0x20302000L) +#define SSL_SERVER_METHOD TLS_server_method +#else +#define SSL_SERVER_METHOD SSLv23_server_method +#endif +#endif + namespace mockssandra { +static DH* dh_parameters() { + // Generated using the following command: `openssl dhparam -C 2048` + // Prime length of 2048 chosen to bypass client-side error: + // `SSL3_CHECK_CERT_AND_ALGORITHM:dh key too small` + + // Note: This is not generated, programmatically, using something like the following: + // `DH_generate_parameters_ex(dh, 2048, DH_GENERATOR_5, NULL)` + // because DH prime generation takes a *REALLY* long time. + static const char* dh_parameters_pem = + "-----BEGIN DH PARAMETERS-----\n" + "MIIBCAKCAQEAusYypYO7u8mHelHjpDuUy7hjBgPw/KS03iSRnP5SNMB6OxVFslXv\n" + "s6McqEf218Fqpzi18tWA7fq3fvlT+Nx1Tda+Za5C8o5niRYxHks5N+RfnnrFf7vn\n" + "0lxrzsXP6es08Ts/UGMsp1nEaCSd/gjDglPgjdC1V/KmBsbT+8IwpbzPPdir0/jA\n" + "r+DXssZRZl7JtymGHXPkXTSBhsqSHamfzGRnAQFWToKAinqAdhY7pN/8krwvRj04\n" + "VYp84xAy2M6mWWqUm/kokN9QjAiT/DZRxZK8VhY7O9+oATo7/YPCMd9Em417O13k\n" + "+F0o/8IMaQvpmtlAsLc2ZKwGqqG+HD2dOwIBAg==\n" + "-----END DH PARAMETERS-----"; + BIO* bio = BIO_new_mem_buf(const_cast(dh_parameters_pem), + -1); // Use null terminator for length + DH* dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); + BIO_free(bio); + return dh; +} + String Ssl::generate_key() { EVP_PKEY* pkey = NULL; EVP_PKEY_CTX* pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_RSA, NULL); @@ -63,7 +108,7 @@ String Ssl::generate_key() { return result; } -String Ssl::generate_cert(const String& key, String cn) { +String Ssl::generate_cert(const String& key, String cn, String ca_cert, String ca_key) { // Assign the proper default hostname if (cn.empty()) { #ifdef WIN32 @@ -85,6 +130,20 @@ String Ssl::generate_cert(const String& key, String cn) { BIO_free(bio); } + X509_REQ* x509_req = NULL; + if (!ca_cert.empty() && !ca_key.empty()) { + x509_req = X509_REQ_new(); + X509_REQ_set_version(x509_req, 2); + X509_REQ_set_pubkey(x509_req, pkey); + + X509_NAME* name = X509_REQ_get_subject_name(x509_req); + X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, + reinterpret_cast("US"), -1, -1, 0); + X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, + reinterpret_cast(cn.c_str()), -1, -1, 0); + X509_REQ_sign(x509_req, pkey, EVP_sha256()); + } + X509* x509 = X509_new(); X509_set_version(x509, 2); ASN1_INTEGER_set(X509_get_serialNumber(x509), 0); @@ -92,13 +151,62 @@ String Ssl::generate_cert(const String& key, String cn) { X509_gmtime_adj(X509_get_notAfter(x509), static_cast(60 * 60 * 24 * 365)); X509_set_pubkey(x509, pkey); - X509_NAME* name = X509_get_subject_name(x509); - X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, reinterpret_cast("US"), - -1, -1, 0); - X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, - reinterpret_cast(cn.c_str()), -1, -1, 0); - X509_set_issuer_name(x509, name); - X509_sign(x509, pkey, EVP_md5()); + if (x509_req) { + X509_set_subject_name(x509, X509_REQ_get_subject_name(x509_req)); + + X509* x509_ca = NULL; + { // Read CA from string + BIO* bio = BIO_new_mem_buf(const_cast(ca_cert.c_str()), ca_cert.length()); + if (!PEM_read_bio_X509(bio, &x509_ca, NULL, NULL)) { + X509_free(x509); + X509_REQ_free(x509_req); + BIO_free(bio); + return ""; + } + BIO_free(bio); + } + X509_set_issuer_name(x509, X509_get_issuer_name(x509_ca)); + + EVP_PKEY* pkey_ca = NULL; + { // Read key from string + BIO* bio = BIO_new_mem_buf(const_cast(ca_key.c_str()), ca_key.length()); + if (!PEM_read_bio_PrivateKey(bio, &pkey_ca, NULL, NULL)) { + BIO_free(bio); + X509_free(x509); + X509_free(x509_ca); + X509_REQ_free(x509_req); + return ""; + } + BIO_free(bio); + } + X509_sign(x509, pkey_ca, EVP_sha256()); + + X509_free(x509_ca); + EVP_PKEY_free(pkey_ca); + } else { + if (cn == "CA") { // Set the purpose as a CA certificate. + X509_EXTENSION* x509_ex; + X509V3_CTX x509v3_ctx; + X509V3_set_ctx_nodb(&x509v3_ctx); + X509V3_set_ctx(&x509v3_ctx, x509, x509, NULL, NULL, 0); + x509_ex = X509V3_EXT_conf_nid(NULL, &x509v3_ctx, NID_basic_constraints, + const_cast("critical,CA:TRUE")); + if (!x509_ex) { + X509_free(x509); + X509_EXTENSION_free(x509_ex); + return ""; + } + X509_add_ext(x509, x509_ex, -1); + X509_EXTENSION_free(x509_ex); + } + X509_NAME* name = X509_get_subject_name(x509); + X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, + reinterpret_cast("US"), -1, -1, 0); + X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, + reinterpret_cast(cn.c_str()), -1, -1, 0); + X509_set_issuer_name(x509, name); + X509_sign(x509, pkey, EVP_sha256()); + } String result; { // Write cert into string @@ -111,6 +219,8 @@ String Ssl::generate_cert(const String& key, String cn) { } X509_free(x509); + if (x509_req) X509_REQ_free(x509_req); + EVP_PKEY_free(pkey); return result; @@ -125,6 +235,18 @@ static void print_ssl_error() { fprintf(stderr, "%s\n", buf); } +static X509* load_cert(const String& cert) { + X509* x509 = NULL; + BIO* bio = BIO_new_mem_buf(const_cast(cert.c_str()), cert.length()); + if (PEM_read_bio_X509(bio, &x509, NULL, NULL) == NULL) { + print_ssl_error(); + BIO_free(bio); + return NULL; + } + BIO_free(bio); + return x509; +} + struct WriteReq { WriteReq(const char* data, size_t len, ClientConnection* connection) : data(data, len) @@ -151,8 +273,7 @@ ClientConnection::ClientConnection(ServerConnection* server) , server_(server) , ssl_(server->ssl_context() ? SSL_new(server->ssl_context()) : NULL) , incoming_bio_(ssl_ ? BIO_new(BIO_s_mem()) : NULL) - , outgoing_bio_(ssl_ ? BIO_new(BIO_s_mem()) : NULL) - , handshake_state_(SSL_HANDSHAKE_INPROGRESS) { + , outgoing_bio_(ssl_ ? BIO_new(BIO_s_mem()) : NULL) { tcp_.init(server->loop()); if (ssl_) { SSL_set_bio(ssl_, incoming_bio_, outgoing_bio_); @@ -185,6 +306,13 @@ int ClientConnection::accept() { return uv_read_start(tcp_.as_stream(), on_alloc, on_read); } +const char* ClientConnection::sni_server_name() const { + if (ssl_) { + return SSL_get_servername(ssl_, TLSEXT_NAMETYPE_host_name); + } + return NULL; +} + void ClientConnection::on_close(uv_handle_t* handle) { ClientConnection* connection = static_cast(handle->data); connection->handle_close(); @@ -234,21 +362,8 @@ void ClientConnection::handle_write(int status) { close(); return; } - if (ssl_) { - switch (handshake_state_) { - case SSL_HANDSHAKE_INPROGRESS: - // Nothing to do - break; - case SSL_HANDSHAKE_DONE: - on_write(); - break; - case SSL_HANDSHAKE_FINAL_WRITE: - handshake_state_ = SSL_HANDSHAKE_DONE; - break; - } - } else { - on_write(); - } + + on_write(); } int ClientConnection::internal_write(const char* data, size_t len) { @@ -329,12 +444,10 @@ void ClientConnection::on_ssl_read(const char* data, size_t len) { internal_write(buf, num_bytes); } - if (is_handshake_done()) { - handshake_state_ = data_written ? SSL_HANDSHAKE_FINAL_WRITE : SSL_HANDSHAKE_DONE; + if (is_handshake_done() && data_written) { + return; // Handshake is not completed; ingore remaining data } - } - - if (is_handshake_done()) { + } else { char buf[SSL_BUF_SIZE]; while ((rc = SSL_read(ssl_, buf, sizeof(buf))) > 0) { on_read(buf, rc); @@ -369,41 +482,55 @@ uv_loop_t* ServerConnection::loop() { return event_loop_->loop(); } -bool ServerConnection::use_ssl(const String& key, const String& cert, const String& password) { +bool ServerConnection::use_ssl(const String& key, const String& cert, + const String& ca_cert /*= ""*/, + bool require_client_cert /*= false*/) { if (ssl_context_) { SSL_CTX_free(ssl_context_); } - if ((ssl_context_ = SSL_CTX_new(SSLv23_server_method())) == NULL) { + if ((ssl_context_ = SSL_CTX_new(SSL_SERVER_METHOD())) == NULL) { print_ssl_error(); return false; } - SSL_CTX_set_default_passwd_cb_userdata(ssl_context_, (void*)password.c_str()); + SSL_CTX_set_default_passwd_cb_userdata(ssl_context_, (void*)""); SSL_CTX_set_default_passwd_cb(ssl_context_, on_password); + SSL_CTX_set_verify(ssl_context_, SSL_VERIFY_NONE, NULL); - X509* x509 = NULL; - { // Read cert from string - BIO* bio = BIO_new_mem_buf(const_cast(cert.c_str()), cert.length()); - if (PEM_read_bio_X509(bio, &x509, NULL, NULL) == NULL) { + { // Load server certificate + X509* x509 = load_cert(cert); + if (!x509) return false; + if (SSL_CTX_use_certificate(ssl_context_, x509) <= 0) { print_ssl_error(); - BIO_free(bio); + X509_free(x509); return false; } - BIO_free(bio); + X509_free(x509); } - if (SSL_CTX_use_certificate(ssl_context_, x509) <= 0) { - print_ssl_error(); - X509_free(x509); - return false; + if (!ca_cert.empty()) { // Load CA certificate + X509* x509 = load_cert(ca_cert); + if (!x509) return false; + if (SSL_CTX_add_extra_chain_cert(ssl_context_, x509) <= 0) { // Certificate freed by function + print_ssl_error(); + X509_free(x509); + return false; + } + if (require_client_cert) { + X509_STORE* cert_store = SSL_CTX_get_cert_store(ssl_context_); + if (X509_STORE_add_cert(cert_store, x509) <= 0) { + print_ssl_error(); + return false; + } + SSL_CTX_set_verify(ssl_context_, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL); + } } - X509_free(x509); EVP_PKEY* pkey = NULL; { // Read key from string BIO* bio = BIO_new_mem_buf(const_cast(key.c_str()), key.length()); - if (PEM_read_bio_PrivateKey(bio, &pkey, on_password, (void*)password.c_str()) == NULL) { + if (PEM_read_bio_PrivateKey(bio, &pkey, on_password, (void*)"") == NULL) { print_ssl_error(); BIO_free(bio); return false; @@ -418,11 +545,13 @@ bool ServerConnection::use_ssl(const String& key, const String& cert, const Stri } EVP_PKEY_free(pkey); - RSA* rsa = RSA_generate_key(512, RSA_F4, NULL, NULL); - SSL_CTX_set_tmp_rsa(ssl_context_, rsa); - RSA_free(rsa); - - SSL_CTX_set_verify(ssl_context_, SSL_VERIFY_NONE, 0); + DH* dh = dh_parameters(); + if (!dh || !SSL_CTX_set_tmp_dh(ssl_context_, dh)) { + print_ssl_error(); + DH_free(dh); + return false; + } + DH_free(dh); return true; } @@ -513,7 +642,8 @@ void ServerConnection::internal_listen() { inc_ref(); // For the TCP handle - rc = tcp_.bind(address_.addr()); + Address::SocketStorage storage; + rc = tcp_.bind(address_.to_sockaddr(&storage)); if (rc != 0) { fprintf(stderr, "Unable to bind address %s\n", address_.to_string(true).c_str()); uv_close(tcp_.as_handle(), on_close); @@ -777,7 +907,7 @@ const char* decode_query_params_v1(const char* input, const char* end, bool is_e } const char* decode_query_params_v2(const char* input, const char* end, QueryParameters* params) { - int8_t flags; + int8_t flags = 0; const char* pos = input; pos = decode_uint16(pos, end, ¶ms->consistency); pos = decode_int8(pos, end, &flags); @@ -798,7 +928,7 @@ const char* decode_query_params_v2(const char* input, const char* end, QueryPara } const char* decode_query_params_v3v4(const char* input, const char* end, QueryParameters* params) { - int8_t flags; + int8_t flags = 0; const char* pos = input; pos = decode_uint16(pos, end, ¶ms->consistency); pos = decode_int8(pos, end, &flags); @@ -824,7 +954,7 @@ const char* decode_query_params_v3v4(const char* input, const char* end, QueryPa } const char* decode_query_params_v5(const char* input, const char* end, QueryParameters* params) { - int32_t flags; + int32_t flags = 0; const char* pos = input; pos = decode_uint16(pos, end, ¶ms->consistency); pos = decode_int32(pos, end, &flags); @@ -967,6 +1097,15 @@ inline int32_t encode_uuid(CassUuid uuid, String* output) { return 16; } +int32_t encode_string_map(const Map >& value, String* output) { + int32_t size = encode_uint16(value.size(), output); + for (Map >::const_iterator it = value.begin(); it != value.end(); ++it) { + size += encode_string(it->first, output); + size += encode_string_list(it->second, output); + } + return size; +} + static String encode_header(int8_t version, int8_t flags, int16_t stream, int8_t opcode, int32_t len) { String header; @@ -1553,8 +1692,8 @@ void SystemPeers::on_run(Request* request) const { } String ip = query.substr(pos, end_pos - pos); - Address address; - if (!Address::from_string(ip, request->address().port(), &address)) { + Address address(ip, request->address().port()); + if (!address.is_valid_and_resolved()) { request->error(ERROR_INVALID_QUERY, "Invalid inet address in WHERE clause"); return; } @@ -1622,8 +1761,8 @@ void SystemPeersDse::on_run(Request* request) const { } String ip = query.substr(pos, end_pos - pos); - Address address; - if (!Address::from_string(ip, request->address().port(), &address)) { + Address address(ip, request->address().port()); + if (!address.is_valid_and_resolved()) { request->error(ERROR_INVALID_QUERY, "Invalid inet address in WHERE clause"); return; } @@ -1872,7 +2011,17 @@ int32_t ProtocolHandler::decode_frame(ClientConnection* client, const char* fram } else { return len - remaining; } - state_ = BODY; + + if (length_ == 0) { + decode_body(client, pos, 0); + version_ = 0; + flags_ = 0; + opcode_ = 0; + length_ = 0; + state_ = PROTOCOL_VERSION; + } else { + state_ = BODY; + } break; case BODY: if (remaining >= length_) { @@ -2234,9 +2383,10 @@ Host::Host(const Address& address, const String& dc, const String& rack, MT19937 } } -SimpleEventLoopGroup::SimpleEventLoopGroup(size_t num_threads) +SimpleEventLoopGroup::SimpleEventLoopGroup(size_t num_threads, + const String& thread_name /*= "mockssandra"*/) : RoundRobinEventLoopGroup(num_threads) { - int rc = init("mockssandra"); + int rc = init(thread_name); UNUSED_(rc); assert(rc == 0 && "Unable to initialize simple event loop"); run(); diff --git a/gtests/src/unit/mockssandra.hpp b/gtests/src/unit/mockssandra.hpp index 2e4d27e80..eb011ce4d 100644 --- a/gtests/src/unit/mockssandra.hpp +++ b/gtests/src/unit/mockssandra.hpp @@ -28,6 +28,7 @@ #include "address.hpp" #include "event_loop.hpp" #include "list.hpp" +#include "map.hpp" #include "ref_counted.hpp" #include "scoped_ptr.hpp" #include "string.hpp" @@ -46,6 +47,7 @@ using datastax::String; using datastax::internal::Atomic; using datastax::internal::List; +using datastax::internal::Map; using datastax::internal::RefCounted; using datastax::internal::ScopedPtr; using datastax::internal::SharedRefPtr; @@ -62,7 +64,8 @@ namespace mockssandra { class Ssl { public: static String generate_key(); - static String generate_cert(const String& key, String cn = ""); + static String generate_cert(const String& key, String cn = "", String ca_cert = "", + String ca_key = ""); }; namespace internal { @@ -103,6 +106,8 @@ class ClientConnection { protected: int accept(); + const char* sni_server_name() const; + private: static void on_close(uv_handle_t* handle); void handle_close(); @@ -144,6 +149,7 @@ class ClientConnection { class ClientConnectionFactory { public: virtual ClientConnection* create(ServerConnection* server) const = 0; + virtual ~ClientConnectionFactory() {} }; class ServerConnectionTask : public RefCounted { @@ -168,7 +174,8 @@ class ServerConnection : public RefCounted { SSL_CTX* ssl_context() { return ssl_context_; } const ClientConnections& clients() const { return clients_; } - bool use_ssl(const String& key, const String& cert, const String& password = ""); + bool use_ssl(const String& key, const String& cert, const String& ca_cert = "", + bool require_client_cert = false); void listen(EventLoopGroup* event_loop_group); int wait_listen(); @@ -355,6 +362,8 @@ struct QueryParameters { String keyspace; }; +int32_t encode_string_map(const Map >& value, String* output); + class Type { public: static Type text(); @@ -1204,7 +1213,7 @@ class Cluster { class SimpleEventLoopGroup : public RoundRobinEventLoopGroup { public: - SimpleEventLoopGroup(size_t num_threads = 1); + SimpleEventLoopGroup(size_t num_threads = 1, const String& thread_name = "mockssandra"); ~SimpleEventLoopGroup(); }; @@ -1246,53 +1255,39 @@ class SimpleCluster : public Cluster { class SimpleEchoServer { public: - SimpleEchoServer(const Address& address = Address("127.0.0.1", 8888)) - : event_loop_group_(1) - , server_(new internal::ServerConnection(address, factory_)) {} + SimpleEchoServer() + : factory_(new EchoClientConnectionFactory()) + , event_loop_group_(1) {} ~SimpleEchoServer() { close(); } void close() { - server_->close(); - server_->wait_close(); + if (server_) { + server_->close(); + server_->wait_close(); + } } String use_ssl(const String& cn = "") { - String key(Ssl::generate_key()); - String cert(Ssl::generate_cert(key, cn)); - if (!server_->use_ssl(key, cert)) { - return ""; - } - return cert; + ssl_key_ = Ssl::generate_key(); + ssl_cert_ = Ssl::generate_cert(ssl_key_, cn); + return ssl_cert_; } - void use_close_immediately() { factory_.use_close_immediately(); } + void use_connection_factory(internal::ClientConnectionFactory* factory) { + factory_.reset(factory); + } - int listen() { + int listen(const Address& address = Address("127.0.0.1", 8888)) { + server_.reset(new internal::ServerConnection(address, *factory_)); + if (!ssl_key_.empty() && !ssl_cert_.empty() && !server_->use_ssl(ssl_key_, ssl_cert_)) { + return -1; + } server_->listen(&event_loop_group_); return server_->wait_listen(); } - void reset(const Address& address) { - server_.reset(new internal::ServerConnection(address, factory_)); - } - private: - class CloseConnection : public internal::ClientConnection { - public: - CloseConnection(internal::ServerConnection* server) - : internal::ClientConnection(server) {} - - virtual int on_accept() { - int rc = accept(); - if (rc != 0) { - return rc; - } - close(); - return rc; - } - }; - class EchoConnection : public internal::ClientConnection { public: EchoConnection(internal::ServerConnection* server) @@ -1301,29 +1296,19 @@ class SimpleEchoServer { virtual void on_read(const char* data, size_t len) { write(data, len); } }; - class ClientConnectionFactory : public internal::ClientConnectionFactory { + class EchoClientConnectionFactory : public internal::ClientConnectionFactory { public: - ClientConnectionFactory() - : close_immediately_(false) {} - - void use_close_immediately() { close_immediately_ = true; } - virtual internal::ClientConnection* create(internal::ServerConnection* server) const { - if (close_immediately_) { - return new CloseConnection(server); - } else { - return new EchoConnection(server); - } + return new EchoConnection(server); } - - private: - bool close_immediately_; }; private: - ClientConnectionFactory factory_; + ScopedPtr factory_; SimpleEventLoopGroup event_loop_group_; internal::ServerConnection::Ptr server_; + String ssl_key_; + String ssl_cert_; }; } // namespace mockssandra diff --git a/gtests/src/unit/tests/test_address.cpp b/gtests/src/unit/tests/test_address.cpp index c318445c5..cae21eb25 100644 --- a/gtests/src/unit/tests/test_address.cpp +++ b/gtests/src/unit/tests/test_address.cpp @@ -19,15 +19,181 @@ #include "address.hpp" using datastax::internal::core::Address; +using datastax::internal::core::AddressSet; + +TEST(AddressUnitTest, FromString) { + EXPECT_TRUE(Address("127.0.0.1", 9042).is_resolved()); + EXPECT_TRUE(Address("0.0.0.0", 9042).is_resolved()); + EXPECT_TRUE(Address("::", 9042).is_resolved()); + EXPECT_TRUE(Address("::1", 9042).is_resolved()); + EXPECT_TRUE(Address("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 9042).is_resolved()); + + EXPECT_FALSE(Address().is_resolved()); + EXPECT_FALSE(Address("localhost", 9042).is_resolved()); + EXPECT_FALSE(Address("datastax.com", 9042).is_resolved()); +} TEST(AddressUnitTest, CompareIPv4) { - EXPECT_GT(Address("255.255.255.255", 9042).compare(Address("0.0.0.0", 9042)), 0); - EXPECT_LT(Address("0.0.0.0", 9042).compare(Address("255.255.255.255", 9042)), 0); - EXPECT_EQ(Address("1.2.3.4", 9042).compare(Address("1.2.3.4", 9042)), 0); + EXPECT_LT(Address("0.0.0.0", 9042), Address("255.255.255.255", 9042)); + EXPECT_EQ(Address("1.2.3.4", 9042), Address("1.2.3.4", 9042)); + EXPECT_NE(Address("1.2.3.4", 9042), Address("5.6.7.8", 9042)); + + EXPECT_LT(Address("0.0.0.0", 9041), Address("0.0.0.0", 9042)); + EXPECT_NE(Address("0.0.0.0", 9041), Address("0.0.0.0", 9042)); + + // Without comparing port + EXPECT_TRUE(Address("0.0.0.0", 9041).equals(Address("0.0.0.0", 9042), false)); + EXPECT_FALSE(Address("127.0.0.1", 9042).equals(Address("0.0.0.0", 9042), false)); } TEST(AddressUnitTest, CompareIPv6) { - EXPECT_GT(Address("0.0.0.0", 1).compare(Address("0.0.0.0", 0), true), 0); - EXPECT_LT(Address("0.0.0.0", 0).compare(Address("0.0.0.0", 1), true), 0); - EXPECT_EQ(Address("0.0.0.0", 0).compare(Address("0.0.0.0", 1), false), 0); + EXPECT_LT(Address("0:0:0:0:0:0:0:0", 9042), Address("0:0:0:0:0:0:0:FFFF", 9042)); + EXPECT_EQ(Address("0:0:0:0:0:0:0:1234", 9042), Address("0:0:0:0:0:0:0:1234", 9042)); + EXPECT_NE(Address("0:0:0:0:0:0:0:1234", 9042), Address("0:0:0:0:0:0:0:5678", 9042)); + + EXPECT_LT(Address("0:0:0:0:0:0:0:0", 9041), Address("0:0:0:0:0:0:0:0", 9042)); + EXPECT_NE(Address("0:0:0:0:0:0:0:0", 9041), Address("0:0:0:0:0:0:0:0", 9042)); + + // Without comparing port + EXPECT_TRUE(Address("::", 9041).equals(Address("::", 9042), false)); + EXPECT_FALSE(Address("::1", 9042).equals(Address("::", 9042), false)); + + EXPECT_EQ(Address("0:0:0:0:0:0:0:0", 9042), Address("::", 9042)); // Normalization +} + +TEST(AddressUnitTest, ToSockAddrIPv4) { + Address expected("127.0.0.1", 9042); + Address::SocketStorage storage; + Address actual(expected.to_sockaddr(&storage)); + EXPECT_EQ(expected, actual); +} + +TEST(AddressUnitTest, ToSockAddrIPv6) { + Address expected("::1", 9042); + Address::SocketStorage storage; + Address actual(expected.to_sockaddr(&storage)); + EXPECT_EQ(expected, actual); +} + +TEST(AddressUnitTest, ToInetIPv4) { + Address expected("127.0.0.1", 9042); + + uint8_t inet_address[4]; + uint8_t inet_address_length = expected.to_inet(inet_address); + EXPECT_EQ(inet_address_length, 4u); + + Address actual(inet_address, inet_address_length, 9042); + EXPECT_EQ(expected, actual); +} + +TEST(AddressUnitTest, ToInetIPv6) { + Address expected("::1", 9042); + + uint8_t inet_address[16]; + uint8_t inet_address_length = expected.to_inet(inet_address); + EXPECT_EQ(inet_address_length, 16u); + + Address actual(inet_address, inet_address_length, 9042); + EXPECT_EQ(expected, actual); +} + +TEST(AddressUnitTest, ToString) { + // Only hostname/address + EXPECT_EQ(Address("127.0.0.1", 9042).hostname_or_address(), "127.0.0.1"); + EXPECT_EQ(Address("::1", 9042).hostname_or_address(), "::1"); + EXPECT_EQ(Address("0:0:0:0:0:0:0:1", 9042).hostname_or_address(), "::1"); // IPv6 normalization + EXPECT_EQ(Address("0:0:0:0:0:0:0:0", 9042).hostname_or_address(), "::"); // IPv6 normalization + EXPECT_EQ(Address("datastax.com", 9042).hostname_or_address(), "datastax.com"); + + // w/o port + EXPECT_EQ(Address("127.0.0.1", 9042).to_string(), "127.0.0.1"); + EXPECT_EQ(Address("::1", 9042).to_string(), "::1"); + EXPECT_EQ(Address("datastax.com", 9042).to_string(), "datastax.com"); + + // w/ port + EXPECT_EQ(Address("127.0.0.1", 9042).to_string(true), "127.0.0.1:9042"); + EXPECT_EQ(Address("::1", 9042).to_string(true), "[::1]:9042"); + EXPECT_EQ(Address("datastax.com", 9042).to_string(true), "datastax.com:9042"); + + // w/ servername + EXPECT_EQ(Address("127.0.0.1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(), + "127.0.0.1 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)"); + EXPECT_EQ(Address("::1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(), + "::1 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)"); + EXPECT_EQ(Address("datastax.com", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(), + "datastax.com (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)"); + + // w/ servername and port + EXPECT_EQ(Address("127.0.0.1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(true), + "127.0.0.1:9042 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)"); + EXPECT_EQ(Address("::1", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(true), + "[::1]:9042 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)"); + EXPECT_EQ(Address("datastax.com", 9042, "d1f1884b-6e05-4b3f-9e88-8a93904bb0e5").to_string(true), + "datastax.com:9042 (d1f1884b-6e05-4b3f-9e88-8a93904bb0e5)"); +} + +TEST(AddressUnitTest, Hash) { + AddressSet set; + + EXPECT_EQ(set.size(), 0u); // Empty + + set.insert(Address("0.0.0.0", 9042)); + EXPECT_EQ(set.size(), 1u); // Added + + // Reinsert + set.insert(Address("0.0.0.0", 9042)); + EXPECT_EQ(set.size(), 1u); // No change + + // Remove + set.erase(Address("0.0.0.0", 9042)); + EXPECT_EQ(set.size(), 0u); // Removed + + // Multiple + set.insert(Address("0.0.0.0", 9042)); + set.insert(Address("127.0.0.1", 9042)); + set.insert(Address("localhost", 9042)); + set.insert(Address("::1", 9042)); + EXPECT_EQ(set.size(), 4u); // Added + EXPECT_EQ(set.count(Address("0.0.0.0", 9042)), 1u); + EXPECT_EQ(set.count(Address("127.0.0.1", 9042)), 1u); + EXPECT_EQ(set.count(Address("localhost", 9042)), 1u); + EXPECT_EQ(set.count(Address("::1", 9042)), 1u); + + // Different port + set.insert(Address("0.0.0.0", 9041)); + EXPECT_EQ(set.size(), 5u); // Added +} + +TEST(AddressUnitTest, StrictWeakOrder) { + { // Family + Address a("localhost", 9042); + Address b("127.0.0.1", 30002, "a"); + ASSERT_NE(a, b); + ASSERT_TRUE(a < b); + ASSERT_FALSE(b < a); + } + + { // Port + Address a("localhost", 9042, "b"); + Address b("localhost", 30002, "a"); + ASSERT_NE(a, b); + ASSERT_TRUE(a < b); + ASSERT_FALSE(b < a); + } + + { // Server name + Address a("127.0.0.2", 9042, "a"); + Address b("127.0.0.1", 9042, "b"); + ASSERT_NE(a, b); + ASSERT_TRUE(a < b); + ASSERT_FALSE(b < a); + } + + { // Hostname or address + Address a("127.0.0.1", 9042, "a"); + Address b("127.0.0.2", 9042, "a"); + ASSERT_NE(a, b); + ASSERT_TRUE(a < b); + ASSERT_FALSE(b < a); + } } diff --git a/gtests/src/unit/tests/test_cloud_secure_connect_config.cpp b/gtests/src/unit/tests/test_cloud_secure_connect_config.cpp new file mode 100644 index 000000000..60bff6733 --- /dev/null +++ b/gtests/src/unit/tests/test_cloud_secure_connect_config.cpp @@ -0,0 +1,687 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "driver_config.hpp" + +#ifdef HAVE_ZLIB +#include "http_test.hpp" + +#include "cloud_secure_connection_config.hpp" +#include "cluster_config.hpp" +#include "cluster_connector.hpp" +#include "cluster_metadata_resolver.hpp" +#include "config.hpp" +#include "http_client.hpp" +#include "json.hpp" +#include "string.hpp" + +#include "zip.h" + +#include +#include + +#define CONFIGURATION_FILE "config.json" +#define CERTIFICATE_AUTHORITY_FILE "ca.crt" +#define CERTIFICATE_FILE "cert" +#define KEY_FILE "key" + +#define CREDS_V1_ZIP_FILE "creds-v1.zip" + +#ifdef _WIN32 +#define PATH_SEPARATOR '\\' +#else +#define PATH_SEPARATOR '/' +#endif + +#define SNI_LOCAL_DC "dc1" +#define SNI_HOST HTTP_MOCK_HOSTNAME +#define SNI_PORT 30002 +#define SNI_HOST_AND_PORT HTTP_MOCK_HOSTNAME ":30002" +#define SNI_HOST_ID_1 "276b1694-64c4-4ba8-afb4-e33915a02f1e" +#define SNI_HOST_ID_2 "8c29f723-5c1c-4ffd-a4ef-8c683a7fc02b" +#define SNI_HOST_ID_3 "fb91d3ff-47cb-447d-b31d-c5721ca8d7ab" +#define METADATA_SERVICE_PORT 30443 + +using datastax::String; +using datastax::internal::core::AddressVec; +using datastax::internal::core::CloudSecureConnectionConfig; +using datastax::internal::core::ClusterConfig; +using datastax::internal::core::ClusterMetadataResolver; +using datastax::internal::core::ClusterSettings; +using datastax::internal::core::Config; +using datastax::internal::core::HttpClient; +using datastax::internal::core::SslContext; +using datastax::internal::core::SslContextFactory; +using datastax::internal::enterprise::DsePlainTextAuthProvider; +using datastax::internal::json::StringBuffer; +using datastax::internal::json::Writer; + +using mockssandra::Ssl; + +class CloudSecureConnectionConfigTest : public HttpTest { +public: + const String& ca_cert() const { return ca_cert_; } + void set_invalid_ca_cert() { ca_cert_ = "!!!!!INVALID!!!!!"; } + const String& ca_key() const { return ca_key_; } + const String& cert() const { return cert_; } + void set_invalid_cert() { cert_ = "!!!!!INVALID!!!!!"; } + const String& key() const { return key_; } + void set_invalid_key() { key_ = "!!!!!INVALID!!!!!"; } + + void SetUp() { + HttpTest::SetUp(); + + char tmp[260] = { 0 }; // Note: 260 is the maximum path on Windows + size_t tmp_length = 260; + uv_os_tmpdir(tmp, &tmp_length); + + tmp_zip_file_ = String(tmp, tmp_length) + PATH_SEPARATOR + CREDS_V1_ZIP_FILE; + + ca_key_ = Ssl::generate_key(); + ca_cert_ = Ssl::generate_cert(ca_key_, "CA"); + key_ = Ssl::generate_key(); + cert_ = Ssl::generate_cert(key_, "", ca_cert_, ca_key_); + } + + const String& creds_zip_file() const { return tmp_zip_file_; } + + void create_zip_file(const String& config, bool is_configuration = true, bool is_ca = true, + bool is_cert = true, bool is_key = true) { + zipFile zip_file = zipOpen64(tmp_zip_file_.c_str(), 0); + + if (is_configuration && add_zip_file_entry(zip_file, CONFIGURATION_FILE)) { + zipWriteInFileInZip(zip_file, config.c_str(), config.length()); + zipCloseFileInZip(zip_file); + } + if (is_ca && add_zip_file_entry(zip_file, CERTIFICATE_AUTHORITY_FILE)) { + zipWriteInFileInZip(zip_file, ca_cert_.c_str(), ca_cert_.length()); + zipCloseFileInZip(zip_file); + } + if (is_cert && add_zip_file_entry(zip_file, CERTIFICATE_FILE)) { + zipWriteInFileInZip(zip_file, cert_.c_str(), cert_.length()); + zipCloseFileInZip(zip_file); + } + if (is_key && add_zip_file_entry(zip_file, KEY_FILE)) { + zipWriteInFileInZip(zip_file, key_.c_str(), key_.length()); + zipCloseFileInZip(zip_file); + } + + zipClose(zip_file, NULL); + } + + static void full_config_credsv1(StringBuffer& buffer, String host = "cloud.datastax.com", + int port = 1443) { + Writer writer(buffer); + writer.StartObject(); + writer.Key("username"); + writer.String("DataStax"); + writer.Key("password"); + writer.String("Constellation"); + writer.Key("host"); + writer.String(host.c_str()); + writer.Key("port"); + writer.Int(port); + writer.EndObject(); + } + +private: + bool add_zip_file_entry(zipFile zip_file, const String& zip_filename) { + zip_fileinfo file_info; + memset(&file_info, 0, sizeof(file_info)); + time_t tmp; + time(&tmp); + struct tm* time_info = localtime(&tmp); + file_info.tmz_date.tm_sec = time_info->tm_sec; + file_info.tmz_date.tm_min = time_info->tm_min; + file_info.tmz_date.tm_hour = time_info->tm_hour; + file_info.tmz_date.tm_mday = time_info->tm_mday; + file_info.tmz_date.tm_mon = time_info->tm_mon; + file_info.tmz_date.tm_year = time_info->tm_year; + + int rc = zipOpenNewFileInZip(zip_file, zip_filename.c_str(), &file_info, NULL, 0, NULL, 0, NULL, + Z_DEFLATED, Z_DEFAULT_COMPRESSION); + return rc == ZIP_OK; + } + +private: + String tmp_zip_file_; + String ca_cert_; + String ca_key_; + String cert_; + String key_; +}; + +TEST_F(CloudSecureConnectionConfigTest, CredsV1) { + Config config; + CloudSecureConnectionConfig cloud_config; + + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString()); + + EXPECT_TRUE(cloud_config.load(creds_zip_file(), &config)); + EXPECT_EQ("DataStax", cloud_config.username()); + EXPECT_EQ("Constellation", cloud_config.password()); + EXPECT_EQ("cloud.datastax.com", cloud_config.host()); + EXPECT_EQ(1443, cloud_config.port()); + EXPECT_EQ(ca_cert(), cloud_config.ca_cert()); + EXPECT_EQ(cert(), cloud_config.cert()); + EXPECT_EQ(key(), cloud_config.key()); + + EXPECT_TRUE(config.ssl_context()); + EXPECT_TRUE(dynamic_cast(config.auth_provider().get()) != NULL); +} + +TEST_F(CloudSecureConnectionConfigTest, CredsV1WithoutCreds) { + Config config; + CloudSecureConnectionConfig cloud_config; + + StringBuffer buffer; + Writer writer(buffer); + writer.StartObject(); + writer.Key("host"); + writer.String("bigdata.datastax.com"); + writer.Key("port"); + writer.Int(2443); + writer.EndObject(); + create_zip_file(buffer.GetString()); + + EXPECT_TRUE(cloud_config.load(creds_zip_file(), &config)); + EXPECT_EQ("", cloud_config.username()); + EXPECT_EQ("", cloud_config.password()); + EXPECT_EQ("bigdata.datastax.com", cloud_config.host()); + EXPECT_EQ(2443, cloud_config.port()); + EXPECT_EQ(ca_cert(), cloud_config.ca_cert()); + EXPECT_EQ(cert(), cloud_config.cert()); + EXPECT_EQ(key(), cloud_config.key()); + + EXPECT_TRUE(config.ssl_context()); + EXPECT_TRUE(dynamic_cast(config.auth_provider().get()) == + NULL); // Not configured +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1ConfigMissingHost) { + CloudSecureConnectionConfig config; + + StringBuffer buffer; + Writer writer(buffer); + writer.StartObject(); + writer.Key("username"); + writer.String("DataStax"); + writer.Key("password"); + writer.String("Constellation"); + writer.Key("port"); + writer.Int(1443); + writer.EndObject(); + create_zip_file(buffer.GetString()); + + EXPECT_FALSE(config.load(creds_zip_file())); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1ConfigMissingPort) { + CloudSecureConnectionConfig config; + + StringBuffer buffer; + Writer writer(buffer); + writer.StartObject(); + writer.Key("username"); + writer.String("DataStax"); + writer.Key("password"); + writer.String("Constellation"); + writer.Key("host"); + writer.String("cloud.datastax.com"); + writer.EndObject(); + create_zip_file(buffer.GetString()); + + EXPECT_FALSE(config.load(creds_zip_file())); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsMissingZipFile) { + CloudSecureConnectionConfig config; + + EXPECT_FALSE(config.load("invalid.zip")); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingConfigJson) { + CloudSecureConnectionConfig config; + + create_zip_file("", false); + EXPECT_FALSE(config.load(creds_zip_file())); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingCA) { + CloudSecureConnectionConfig config; + + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString(), true, false); + EXPECT_FALSE(config.load(creds_zip_file())); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingCert) { + CloudSecureConnectionConfig config; + + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString(), true, true, false); + EXPECT_FALSE(config.load(creds_zip_file())); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1MissingKey) { + CloudSecureConnectionConfig config; + + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString(), true, true, false); + create_zip_file(buffer.GetString(), true, true, true, false); + EXPECT_FALSE(config.load(creds_zip_file())); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1SslCaCert) { + Config config; + CloudSecureConnectionConfig cloud_config; + + StringBuffer buffer; + full_config_credsv1(buffer); + set_invalid_ca_cert(); + create_zip_file(buffer.GetString()); + + EXPECT_FALSE(cloud_config.load(creds_zip_file(), &config)); + EXPECT_FALSE(config.ssl_context()); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1SslCert) { + Config config; + CloudSecureConnectionConfig cloud_config; + + StringBuffer buffer; + full_config_credsv1(buffer); + set_invalid_cert(); + create_zip_file(buffer.GetString()); + + EXPECT_FALSE(cloud_config.load(creds_zip_file(), &config)); + EXPECT_FALSE(config.ssl_context()); +} + +TEST_F(CloudSecureConnectionConfigTest, InvalidCredsV1SslKey) { + Config config; + CloudSecureConnectionConfig cloud_config; + + StringBuffer buffer; + full_config_credsv1(buffer); + set_invalid_key(); + create_zip_file(buffer.GetString()); + + EXPECT_FALSE(cloud_config.load(creds_zip_file(), &config)); + EXPECT_FALSE(config.ssl_context()); +} + +class CloudMetadataServerTest : public CloudSecureConnectionConfigTest { +public: + void SetUp() { + CloudSecureConnectionConfigTest::SetUp(); + + StringBuffer buffer; + full_config_credsv1(buffer, HTTP_MOCK_HOSTNAME, HTTP_MOCK_SERVER_PORT); + create_zip_file(buffer.GetString()); + cloud_config_.load(creds_zip_file(), &config_); + + use_ssl(ca_cert(), ca_key(), HTTP_MOCK_HOSTNAME); // Ensure HttpServer is configured to use SSL + + ClusterSettings settings(config_); + resolver_ = config_.cluster_metadata_resolver_factory()->new_instance(settings); + } + + void start_http_server(bool is_content_type = true, bool is_contact_info = true, + bool is_local_dc = true, bool is_contact_points = true, + bool is_sni_proxy_address = true, bool is_port = true) { + set_path("/metadata"); + + StringBuffer buffer; + response_v1(buffer, is_contact_info, is_local_dc, is_contact_points, is_sni_proxy_address, + is_port); + set_response_body(buffer.GetString()); + + set_content_type(is_content_type ? response_v1_content_type() : "invalid"); + + HttpTest::start_http_server(); + } + + const ClusterMetadataResolver::Ptr& resolver() const { return resolver_; } + + static void on_resolve_success(ClusterMetadataResolver* resolver, bool* flag) { + *flag = true; + EXPECT_EQ("dc1", resolver->local_dc()); + + const AddressVec& contact_points = resolver->resolved_contact_points(); + ASSERT_EQ(3u, contact_points.size()); + EXPECT_EQ(Address(SNI_HOST, SNI_PORT, SNI_HOST_ID_1), contact_points[0]); + EXPECT_EQ(Address(SNI_HOST, SNI_PORT, SNI_HOST_ID_2), contact_points[1]); + EXPECT_EQ(Address(SNI_HOST, SNI_PORT, SNI_HOST_ID_3), contact_points[2]); + } + + static void on_resolve_success_default_port(ClusterMetadataResolver* resolver, bool* flag) { + *flag = true; + EXPECT_EQ("dc1", resolver->local_dc()); + + const AddressVec& contact_points = resolver->resolved_contact_points(); + ASSERT_EQ(3u, contact_points.size()); + EXPECT_EQ(Address(SNI_HOST, METADATA_SERVICE_PORT, SNI_HOST_ID_1), contact_points[0]); + EXPECT_EQ(Address(SNI_HOST, METADATA_SERVICE_PORT, SNI_HOST_ID_2), contact_points[1]); + EXPECT_EQ(Address(SNI_HOST, METADATA_SERVICE_PORT, SNI_HOST_ID_3), contact_points[2]); + } + + static void on_resolve_failed(ClusterMetadataResolver* resolver, bool* flag) { + *flag = true; + EXPECT_EQ(0u, resolver->resolved_contact_points().size()); + } + + static void on_resolve_local_dc_failed(ClusterMetadataResolver* resolver, bool* flag) { + *flag = true; + EXPECT_EQ("", resolver->local_dc()); + EXPECT_EQ(0u, resolver->resolved_contact_points().size()); + } + +private: + static void response_v1(StringBuffer& buffer, bool is_contact_info = true, + bool is_local_dc = true, bool is_contact_points = true, + bool is_sni_proxy_address = true, bool is_port = true) { + Writer writer(buffer); + writer.StartObject(); + writer.Key("version"); + writer.Int(1); + writer.Key("region"); + writer.String("local"); + if (is_contact_info) { + writer.Key("contact_info"); + writer.StartObject(); + writer.Key("type"); + writer.String("sni_proxy"); + if (is_local_dc) { + writer.Key("local_dc"); + writer.String(SNI_LOCAL_DC); + } + if (is_contact_points) { + writer.Key("contact_points"); + writer.StartArray(); + writer.String(SNI_HOST_ID_1); + writer.String(SNI_HOST_ID_2); + writer.String(SNI_HOST_ID_3); + writer.EndArray(); + } + if (is_sni_proxy_address) { + writer.Key("sni_proxy_address"); + if (is_port) { + writer.String(SNI_HOST_AND_PORT); + } else { + writer.String(SNI_HOST); + } + } + writer.EndObject(); + } + writer.EndObject(); + } + + static const char* response_v1_content_type() { return "application/json"; } + +private: + Config config_; + CloudSecureConnectionConfig cloud_config_; + ClusterMetadataResolver::Ptr resolver_; +}; + +TEST_F(CloudMetadataServerTest, ResolveV1StandardSsl) { + start_http_server(); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_success, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveV1DefaultPortSsl) { + start_http_server(true, true, true, true, true, false); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, + bind_callback(on_resolve_success_default_port, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, InvalidMetadataServer) { + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); +} + +TEST_F(CloudMetadataServerTest, ResolveV1InvalidContentTypeSsl) { + start_http_server(false); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveV1MissingContactInfoSsl) { + start_http_server(true, false); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveV1MissingLocalDcSsl) { + start_http_server(true, true, false); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, + bind_callback(on_resolve_local_dc_failed, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveV1MissingContactPointsSsl) { + start_http_server(true, true, true, false); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveV1MissingSniProxyAddressSsl) { + start_http_server(true, true, true, true, false); + + bool is_resolved = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolved)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolved); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveInvalidJsonResponse) { + add_logging_critera("Unable to configure driver from metadata server: Metadata JSON is invalid"); + + set_path("/metadata"); + set_response_body("[]"); + set_content_type("application/json"); + HttpTest::start_http_server(); + + bool is_resolve_failed = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolve_failed)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolve_failed); + EXPECT_EQ(logging_criteria_count(), 1); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveErrorResponse) { + add_logging_critera("Unable to configure driver from metadata server: Returned error response " + "code 400: 'Invalid version'"); + + const char* response_body = "{" + "\"code\": 400," + "\"message\": \"Invalid version\"" + "}"; + + set_path("/metadata"); + set_response_body(response_body); + set_response_status_code(400); + set_content_type("application/json"); + HttpTest::start_http_server(); + + bool is_resolve_failed = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolve_failed)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolve_failed); + EXPECT_EQ(logging_criteria_count(), 1); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, ResolveInvalidJsonErrorResponse) { + add_logging_critera("Unable to configure driver from metadata server: Returned error response " + "code 400: '[]'"); + + set_path("/metadata"); + set_response_body("[]"); + set_response_status_code(400); + set_content_type("application/json"); + HttpTest::start_http_server(); + + bool is_resolve_failed = false; + AddressVec contact_points; + resolver()->resolve(loop(), contact_points, bind_callback(on_resolve_failed, &is_resolve_failed)); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_resolve_failed); + EXPECT_EQ(logging_criteria_count(), 1); + + stop_http_server(); +} + +TEST_F(CloudMetadataServerTest, CloudConfiguredInvalidContactPointsOverride) { + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString()); + + ClusterConfig cluster_config; + CassCluster* cluster = CassCluster::to(&cluster_config); + EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster, creds_zip_file().c_str())); + add_logging_critera("Contact points cannot be overridden with cloud secure connection bundle"); + EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, + cass_cluster_set_contact_points(cluster, "some.contact.point")); + EXPECT_EQ(logging_criteria_count(), 1); +} + +TEST_F(CloudMetadataServerTest, CloudConfiguredInvalidSslContextOverride) { + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString()); + + ClusterConfig cluster_config; + CassCluster* cluster = CassCluster::to(&cluster_config); + SslContext::Ptr ssl_context(SslContextFactory::create()); + CassSsl* ssl = CassSsl::to(ssl_context.get()); + + EXPECT_EQ(CASS_OK, cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster, creds_zip_file().c_str())); + add_logging_critera("SSL context cannot be overridden with cloud secure connection bundle"); + cass_cluster_set_ssl(cluster, ssl); + EXPECT_EQ(logging_criteria_count(), 1); +} + +TEST_F(CloudMetadataServerTest, CloudConfiguredFailureContactPointsExist) { + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString()); + + ClusterConfig cluster_config; + CassCluster* cluster = CassCluster::to(&cluster_config); + EXPECT_EQ(CASS_OK, cass_cluster_set_contact_points(cluster, "some.contact.point")); + add_logging_critera("Contact points must not be specified with cloud secure connection bundle"); + EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, + cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster, creds_zip_file().c_str())); + EXPECT_EQ(logging_criteria_count(), 1); +} + +TEST_F(CloudMetadataServerTest, CloudConfiguredFailureSslContextExist) { + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString()); + + ClusterConfig cluster_config; + CassCluster* cluster = CassCluster::to(&cluster_config); + SslContext::Ptr ssl_context(SslContextFactory::create()); + CassSsl* ssl = CassSsl::to(ssl_context.get()); + + cass_cluster_set_ssl(cluster, ssl); + add_logging_critera("SSL context must not be specified with cloud secure connection bundle"); + EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, + cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster, creds_zip_file().c_str())); + EXPECT_EQ(logging_criteria_count(), 1); +} + +TEST_F(CloudMetadataServerTest, CloudConfiguredFailureContactPointsAndSslContextExist) { + StringBuffer buffer; + full_config_credsv1(buffer); + create_zip_file(buffer.GetString()); + + ClusterConfig cluster_config; + CassCluster* cluster = CassCluster::to(&cluster_config); + SslContext::Ptr ssl_context(SslContextFactory::create()); + CassSsl* ssl = CassSsl::to(ssl_context.get()); + + EXPECT_EQ(CASS_OK, cass_cluster_set_contact_points(cluster, "some.contact.point")); + cass_cluster_set_ssl(cluster, ssl); + add_logging_critera( + "Contact points and SSL context must not be specified with cloud secure connection bundle"); + EXPECT_EQ(CASS_ERROR_LIB_BAD_PARAMS, + cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init( + cluster, creds_zip_file().c_str())); + EXPECT_EQ(logging_criteria_count(), 1); +} +#endif diff --git a/gtests/src/unit/tests/test_cluster.cpp b/gtests/src/unit/tests/test_cluster.cpp index 8e9f91357..b00e31334 100644 --- a/gtests/src/unit/tests/test_cluster.cpp +++ b/gtests/src/unit/tests/test_cluster.cpp @@ -288,6 +288,39 @@ class ClusterUnitTest : public EventLoopTest { }; }; + class LocalDcClusterMetadataResolver : public ClusterMetadataResolver { + public: + LocalDcClusterMetadataResolver(const String& local_dc) + : desired_local_dc_(local_dc) {} + + private: + virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) { + resolved_contact_points_ = contact_points; + local_dc_ = desired_local_dc_; + callback_(this); + } + + virtual void internal_cancel() {} + + private: + String desired_local_dc_; + }; + + class LocalDcClusterMetadataResolverFactory : public ClusterMetadataResolverFactory { + public: + LocalDcClusterMetadataResolverFactory(const String& local_dc) + : local_dc_(local_dc) {} + + virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const { + return ClusterMetadataResolver::Ptr(new LocalDcClusterMetadataResolver(local_dc_)); + } + + virtual const char* name() const { return "LocalDc"; } + + private: + String local_dc_; + }; + static void on_connection_connected(ClusterConnector* connector, Future* future) { if (connector->is_ok()) { future->set(); @@ -329,10 +362,10 @@ TEST_F(ClusterUnitTest, Simple) { mockssandra::SimpleCluster cluster(simple(), 3); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); - contact_points.push_back("127.0.0.2"); - contact_points.push_back("127.0.0.3"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); + contact_points.push_back(Address("127.0.0.2", 9042)); + contact_points.push_back(Address("127.0.0.3", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -363,13 +396,13 @@ TEST_F(ClusterUnitTest, SimpleWithCriticalFailures) { .then(mockssandra::Action::Builder().plaintext_auth()) .auth_success(); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); // Good - contact_points.push_back("127.0.0.2"); // Invalid auth + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); // Good + contact_points.push_back(Address("127.0.0.2", 9042)); // Invalid auth add_logging_critera("Unable to connect to host 127.0.0.2 because of the " "following error: Received error response 'Invalid " "credentials'"); - contact_points.push_back("127.0.0.3"); // Invalid protocol + contact_points.push_back(Address("127.0.0.3", 9042)); // Invalid protocol add_logging_critera("Unable to connect to host 127.0.0.3 because of the " "following error: Received error response 'Invalid or " "unsupported protocol version'"); @@ -399,8 +432,8 @@ TEST_F(ClusterUnitTest, Resolve) { mockssandra::SimpleCluster cluster(simple(), 3); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("localhost"); + AddressVec contact_points; + contact_points.push_back(Address("localhost", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -416,8 +449,8 @@ TEST_F(ClusterUnitTest, Auth) { mockssandra::SimpleCluster cluster(auth()); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -440,8 +473,8 @@ TEST_F(ClusterUnitTest, Ssl) { settings.control_connection_settings.connection_settings = use_ssl(&cluster); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -461,10 +494,10 @@ TEST_F(ClusterUnitTest, Cancel) { Vector connect_futures; Vector connectors; - ContactPointList contact_points; - contact_points.push_back("localhost"); - contact_points.push_back("google.com"); - contact_points.push_back("doesnotexist.dne"); + AddressVec contact_points; + contact_points.push_back(Address("localhost", 9042)); + contact_points.push_back(Address("google.com", 9042)); + contact_points.push_back(Address("doesnotexist.dne", 9042)); for (size_t i = 0; i < 10; ++i) { Future::Ptr connect_future(new Future()); @@ -507,8 +540,8 @@ TEST_F(ClusterUnitTest, ReconnectToDiscoveredHosts) { outage_plan.start_node(1); outage_plan.stop_node(3); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); @@ -550,8 +583,8 @@ TEST_F(ClusterUnitTest, ReconnectUpdateHosts) { outage_plan.stop_node(3); outage_plan.stop_node(1); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); @@ -590,8 +623,8 @@ TEST_F(ClusterUnitTest, CloseDuringReconnect) { mockssandra::SimpleCluster mock_cluster(simple()); mock_cluster.start_all(); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); @@ -623,8 +656,8 @@ TEST_F(ClusterUnitTest, NotifyDownUp) { mockssandra::SimpleCluster mock_cluster(simple(), 3); mock_cluster.start_all(); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); @@ -666,8 +699,8 @@ TEST_F(ClusterUnitTest, ProtocolNegotiation) { mockssandra::SimpleCluster cluster(builder.build()); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -688,8 +721,8 @@ TEST_F(ClusterUnitTest, NoSupportedProtocols) { mockssandra::SimpleCluster cluster(builder.build()); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -707,10 +740,10 @@ TEST_F(ClusterUnitTest, FindValidHost) { mockssandra::SimpleCluster cluster(simple(), 3); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.99.99.1"); // Invalid - contact_points.push_back("127.99.99.2"); // Invalid - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.99.99.1", 9042)); // Invalid + contact_points.push_back(Address("127.99.99.2", 9042)); // Invalid + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -730,10 +763,10 @@ TEST_F(ClusterUnitTest, NoHostsAvailable) { // Don't start the cluster // Try multiple hosts - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); - contact_points.push_back("127.0.0.2"); - contact_points.push_back("127.0.0.3"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); + contact_points.push_back(Address("127.0.0.2", 9042)); + contact_points.push_back(Address("127.0.0.3", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -751,8 +784,8 @@ TEST_F(ClusterUnitTest, InvalidAuth) { mockssandra::SimpleCluster cluster(auth()); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -775,8 +808,8 @@ TEST_F(ClusterUnitTest, InvalidSsl) { use_ssl(&cluster); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -803,8 +836,8 @@ TEST_F(ClusterUnitTest, DCAwareRecoverOnRemoteHost) { Address local_address("127.0.0.1", 9042); Address remote_address("127.0.0.2", 9042); - ContactPointList contact_points; - contact_points.push_back(local_address.to_string()); + AddressVec contact_points; + contact_points.push_back(local_address); Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); @@ -858,8 +891,8 @@ TEST_F(ClusterUnitTest, InvalidDC) { mockssandra::SimpleCluster cluster(simple()); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -886,8 +919,8 @@ TEST_F(ClusterUnitTest, DisableEventsOnStartup) { mockssandra::SimpleCluster cluster(simple(), 2); ASSERT_EQ(cluster.start_all(), 0); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(new Future()); ClusterConnector::Ptr connector( @@ -929,8 +962,8 @@ TEST_F(ClusterUnitTest, ReconnectionPolicy) { outage_plan.stop_node(1); outage_plan.start_node(1); - ContactPointList contact_points; - contact_points.push_back("127.0.0.1"); + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); @@ -957,3 +990,80 @@ TEST_F(ClusterUnitTest, ReconnectionPolicy) { EXPECT_GE(policy->scheduled_delay_count(), 2u); EXPECT_EQ(3u, mock_cluster.connection_attempts(1)); // Includes initial connection attempt } + +TEST_F(ClusterUnitTest, LocalDcFromResolver) { + mockssandra::SimpleCluster cluster(simple(), 1); + ASSERT_EQ(cluster.start_all(), 0); + + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", 9042)); + + Future::Ptr connect_future(new Future()); + ClusterConnector::Ptr connector( + new ClusterConnector(contact_points, PROTOCOL_VERSION, + bind_callback(on_connection_reconnect, connect_future.get()))); + + ClusterSettings settings; + settings.cluster_metadata_resolver_factory = ClusterMetadataResolverFactory::Ptr( + new LocalDcClusterMetadataResolverFactory("this_local_dc")); + connector->with_settings(settings)->connect(event_loop()); + + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(connect_future->error()); + ASSERT_EQ("this_local_dc", connect_future->cluster()->local_dc()); +} + +TEST_F(ClusterUnitTest, NoContactPoints) { + // No cluster needed + + AddressVec contact_points; // Empty + + Future::Ptr connect_future(new Future()); + ClusterConnector::Ptr connector( + new ClusterConnector(contact_points, PROTOCOL_VERSION, + bind_callback(on_connection_connected, connect_future.get()))); + connector->connect(event_loop()); + + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)) + << "Timed out waiting for cluster to connect"; + ASSERT_TRUE(connect_future->error()); + EXPECT_EQ(connect_future->error()->code, CASS_ERROR_LIB_NO_HOSTS_AVAILABLE); +} + +TEST_F(ClusterUnitTest, PortIsAssignedDuringConnection) { + mockssandra::SimpleCluster cluster(simple(), 1); + ASSERT_EQ(cluster.start_all(), 0); + + AddressVec contact_points; + contact_points.push_back(Address("127.0.0.1", -1)); + + Future::Ptr connect_future(new Future()); + ClusterConnector::Ptr connector( + new ClusterConnector(contact_points, PROTOCOL_VERSION, + bind_callback(on_connection_reconnect, connect_future.get()))); + + ClusterSettings settings; // Default port and metadata resolver + connector->with_settings(settings)->connect(event_loop()); + + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(connect_future->error()); +} + +TEST_F(ClusterUnitTest, HostIsResolvedAndPortIsAssignedDuringConnection) { + mockssandra::SimpleCluster cluster(simple(), 1); + ASSERT_EQ(cluster.start_all(), 0); + + AddressVec contact_points; + contact_points.push_back(Address("localhost", -1)); + + Future::Ptr connect_future(new Future()); + ClusterConnector::Ptr connector( + new ClusterConnector(contact_points, PROTOCOL_VERSION, + bind_callback(on_connection_reconnect, connect_future.get()))); + + ClusterSettings settings; // Default port and metadata resolver + connector->with_settings(settings)->connect(event_loop()); + + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(connect_future->error()); +} diff --git a/gtests/src/unit/tests/test_connection.cpp b/gtests/src/unit/tests/test_connection.cpp index 6f3d8b823..1f392faa1 100644 --- a/gtests/src/unit/tests/test_connection.cpp +++ b/gtests/src/unit/tests/test_connection.cpp @@ -303,7 +303,7 @@ TEST_F(ConnectionUnitTest, SslCancel) { } TEST_F(ConnectionUnitTest, Timeout) { - mockssandra::RequestHandler::Builder builder; + mockssandra::SimpleRequestHandlerBuilder builder; builder.on(mockssandra::OPCODE_STARTUP).no_result(); // Don't return a response mockssandra::SimpleCluster cluster(builder.build()); ASSERT_EQ(cluster.start_all(), 0); diff --git a/gtests/src/unit/tests/test_decoder.cpp b/gtests/src/unit/tests/test_decoder.cpp index 33ae4f98f..c9a75f64b 100644 --- a/gtests/src/unit/tests/test_decoder.cpp +++ b/gtests/src/unit/tests/test_decoder.cpp @@ -740,7 +740,7 @@ TEST_F(DecoderUnitTest, DecodeStringMultiMap) { 0, 6, 80, 121, 116, 104, 111, 110, // Python 0, 4, 82, 117, 98, 121 }; // Ruby TestDecoder decoder(input, 58); - Map > value; + StringMultimap value; // SUCCESS ASSERT_TRUE(decoder.decode_string_multimap(value)); diff --git a/gtests/src/unit/tests/test_exec_profile.cpp b/gtests/src/unit/tests/test_exec_profile.cpp index d4b9fdde9..85365ca8d 100644 --- a/gtests/src/unit/tests/test_exec_profile.cpp +++ b/gtests/src/unit/tests/test_exec_profile.cpp @@ -41,8 +41,8 @@ TEST(ExecutionProfileUnitTest, Consistency) { Config copy_config = config.new_instance(); ExecutionProfile profile_lookup; ASSERT_TRUE(execution_profile(copy_config, "profile", profile_lookup)); - ASSERT_EQ(CASS_DEFAULT_CONSISTENCY, profile_lookup.consistency()); - ASSERT_EQ(CASS_DEFAULT_CONSISTENCY, copy_config.default_profile().consistency()); + ASSERT_EQ(CASS_CONSISTENCY_UNKNOWN, profile_lookup.consistency()); + ASSERT_EQ(CASS_CONSISTENCY_UNKNOWN, copy_config.default_profile().consistency()); } TEST(ExecutionProfileUnitTest, SerialConsistency) { diff --git a/gtests/src/unit/tests/test_http_client.cpp b/gtests/src/unit/tests/test_http_client.cpp new file mode 100644 index 000000000..aa6ef14f0 --- /dev/null +++ b/gtests/src/unit/tests/test_http_client.cpp @@ -0,0 +1,295 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "http_test.hpp" + +#include "driver_info.hpp" +#include "http_client.hpp" + +using namespace datastax::internal; +using datastax::internal::core::HttpClient; +using datastax::internal::core::SocketSettings; +using datastax::internal::core::SslContext; +using datastax::internal::core::SslContextFactory; +using mockssandra::Ssl; + +class HttpClientUnitTest : public HttpTest { +public: + static void on_success_response(HttpClient* client, bool* flag) { + *flag = true; + EXPECT_TRUE(client->is_ok()) << "Failed to connect: " << client->error_message(); + EXPECT_EQ("text/plain", client->content_type()); + EXPECT_EQ(echo_response(), client->response_body()); + } + + static void on_failed_response(HttpClient* client, bool* flag) { + *flag = true; + EXPECT_FALSE(client->is_ok()); + } + + static void on_canceled(HttpClient* client, bool* flag) { + if (client->is_canceled()) { + *flag = true; + } + } + +private: + static String echo_response() { + OStringStream ss; + + ss << "GET / HTTP/1.0\r\n" + << "Host: " HTTP_MOCK_SERVER_IP << ":" << HTTP_MOCK_SERVER_PORT << "\r\n" + << "User-Agent: cpp-driver/" << driver_version() << "\r\nAccept: */*\r\n\r\n"; + + return ss.str(); + } +}; + +TEST_F(HttpClientUnitTest, Simple) { + start_http_server(); + + bool is_success = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_success_response, &is_success))); + client->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_success); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, Cancel) { + start_http_server(); + + Vector clients; + + bool is_canceled = false; + for (size_t i = 0; i < 10; ++i) { + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_canceled, &is_canceled))); + client->request(loop()); + clients.push_back(client); + } + + Vector::iterator it = clients.begin(); + while (it != clients.end()) { + (*it)->cancel(); + uv_run(loop(), UV_RUN_NOWAIT); + it++; + } + + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_canceled); +} + +TEST_F(HttpClientUnitTest, CancelTimeout) { + set_close_connnection_after_request(false); + start_http_server(); + + Vector clients; + + bool is_canceled = false; + for (size_t i = 0; i < 10; ++i) { + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), + "/invalid", bind_callback(on_canceled, &is_canceled))); + client + ->with_request_timeout_ms(200) // Timeout quickly + ->request(loop()); + clients.push_back(client); + } + + Vector::iterator it = clients.begin(); + while (it != clients.end()) { + (*it)->cancel(); + uv_run(loop(), UV_RUN_NOWAIT); + it++; + } + + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_canceled); + + for (Vector::const_iterator it = clients.begin(), end = clients.end(); it != end; + ++it) { + const HttpClient::Ptr& client(*it); + if (!client->is_canceled()) { + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_TIMEOUT); + EXPECT_EQ(client->status_code(), 404); + } + } +} + +TEST_F(HttpClientUnitTest, InvalidHttpServer) { + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_failed_response, &is_failed))); + client->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET); +} + +TEST_F(HttpClientUnitTest, InvalidHttpServerResponse) { + enable_valid_response(false); + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_failed_response, &is_failed))); + client->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_PARSING); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, InvalidPath) { + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), + "/invalid", bind_callback(on_failed_response, &is_failed))); + client->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_HTTP_STATUS); + EXPECT_EQ(client->status_code(), 404); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, Timeout) { + set_close_connnection_after_request(false); + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), + "/invalid", bind_callback(on_failed_response, &is_failed))); + client + ->with_request_timeout_ms(200) // Timeout quickly + ->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_TIMEOUT); + EXPECT_EQ(client->status_code(), 404); + + stop_http_server(); +} + +#ifdef HAVE_OPENSSL +TEST_F(HttpClientUnitTest, Ssl) { + SocketSettings settings = use_ssl(); + start_http_server(); + + bool is_success = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_success_response, &is_success))); + client->with_settings(settings)->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_success); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, NoClientCertProvidedSsl) { + String ca_key = mockssandra::Ssl::generate_key(); + String ca_cert = mockssandra::Ssl::generate_cert(ca_key, "CA"); + + use_ssl(ca_key, ca_cert, HTTP_MOCK_HOSTNAME); + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_failed_response, &is_failed))); + + SslContext::Ptr ssl_context(SslContextFactory::create()); + + // No client certificate provided + + ssl_context->add_trusted_cert(ca_cert.c_str(), ca_cert.size()); + + SocketSettings settings; + settings.ssl_context = ssl_context; + client->with_settings(settings)->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, InvalidClientCertSsl) { + String ca_key = mockssandra::Ssl::generate_key(); + String ca_cert = mockssandra::Ssl::generate_cert(ca_key, "CA"); + + String client_key = mockssandra::Ssl::generate_key(); + String client_cert = mockssandra::Ssl::generate_cert(client_key, ""); // Self-signed + + use_ssl(ca_key, ca_cert, HTTP_MOCK_HOSTNAME); + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_failed_response, &is_failed))); + + SslContext::Ptr ssl_context(SslContextFactory::create()); + + ssl_context->set_cert(client_cert.c_str(), client_cert.size()); + ssl_context->set_private_key(client_key.c_str(), client_key.size(), "", + 0); // No password expected for the private key + ssl_context->add_trusted_cert(ca_cert.c_str(), ca_cert.size()); + + SocketSettings settings; + settings.ssl_context = ssl_context; + + client->with_settings(settings)->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, InvalidClientSslNotConfigured) { + use_ssl(); + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_failed_response, &is_failed))); + client->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_CLOSED); + + stop_http_server(); +} + +TEST_F(HttpClientUnitTest, InvalidServerSslNotConfigured) { + SocketSettings settings = use_ssl("127.0.0.1", false); + start_http_server(); + + bool is_failed = false; + HttpClient::Ptr client(new HttpClient(Address(HTTP_MOCK_SERVER_IP, HTTP_MOCK_SERVER_PORT), "/", + bind_callback(on_failed_response, &is_failed))); + client->with_settings(settings)->request(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + EXPECT_TRUE(is_failed); + EXPECT_EQ(client->error_code(), HttpClient::HTTP_CLIENT_ERROR_SOCKET); + + stop_http_server(); +} +#endif diff --git a/gtests/src/unit/tests/test_load_balancing.cpp b/gtests/src/unit/tests/test_load_balancing.cpp index 31fdf2f75..48a597e9a 100644 --- a/gtests/src/unit/tests/test_load_balancing.cpp +++ b/gtests/src/unit/tests/test_load_balancing.cpp @@ -169,7 +169,7 @@ void test_dc_aware_policy(size_t local_count, size_t remote_count) { populate_hosts(local_count, "rack", LOCAL_DC, &hosts); populate_hosts(remote_count, "rack", REMOTE_DC, &hosts); DCAwarePolicy policy(LOCAL_DC, remote_count, false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); const size_t total_hosts = local_count + remote_count; @@ -185,7 +185,7 @@ TEST(RoundRobinLoadBalancingUnitTest, Simple) { populate_hosts(2, "rack", "dc", &hosts); RoundRobinPolicy policy; - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); // start on first elem ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); @@ -207,7 +207,7 @@ TEST(RoundRobinLoadBalancingUnitTest, OnAdd) { populate_hosts(2, "rack", "dc", &hosts); RoundRobinPolicy policy; - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); // baseline ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); @@ -230,7 +230,7 @@ TEST(RoundRobinLoadBalancingUnitTest, OnRemove) { populate_hosts(3, "rack", "dc", &hosts); RoundRobinPolicy policy; - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); SharedRefPtr host = hosts.begin()->second; @@ -251,7 +251,7 @@ TEST(RoundRobinLoadBalancingUnitTest, OnUpAndDown) { populate_hosts(3, "rack", "dc", &hosts); RoundRobinPolicy policy; - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp_before1(policy.new_query_plan("ks", NULL, NULL)); ScopedPtr qp_before2(policy.new_query_plan("ks", NULL, NULL)); @@ -297,7 +297,7 @@ TEST(RoundRobinLoadBalancingUnitTest, VerifyEqualDistribution) { populate_hosts(3, "rack", "dc", &hosts); RoundRobinPolicy policy; - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); { // All nodes QueryCounts counts(run_policy(policy, 12)); @@ -338,7 +338,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, SomeDatacenterLocalUnspecified) { h->set_rack_and_dc("", ""); DCAwarePolicy policy(LOCAL_DC, 1, false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); @@ -353,7 +353,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, SingleLocalDown) { populate_hosts(1, "rack", REMOTE_DC, &hosts); DCAwarePolicy policy(LOCAL_DC, 1, false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp_before( policy.new_query_plan("ks", NULL, NULL)); // has down host ptr in plan @@ -380,7 +380,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, AllLocalRemovedReturned) { populate_hosts(1, "rack", REMOTE_DC, &hosts); DCAwarePolicy policy(LOCAL_DC, 1, false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp_before( policy.new_query_plan("ks", NULL, NULL)); // has down host ptr in plan @@ -412,7 +412,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, RemoteRemovedReturned) { SharedRefPtr target_host = hosts[target_addr]; DCAwarePolicy policy(LOCAL_DC, 1, false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp_before( policy.new_query_plan("ks", NULL, NULL)); // has down host ptr in plan @@ -443,7 +443,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, UsedHostsPerDatacenter) { for (size_t used_hosts = 0; used_hosts < 4; ++used_hosts) { DCAwarePolicy policy(LOCAL_DC, used_hosts, false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); Vector seq; @@ -476,7 +476,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, AllowRemoteDatacentersForLocalConsist // Not allowing remote DCs for local CLs bool allow_remote_dcs_for_local_cl = false; DCAwarePolicy policy(LOCAL_DC, 3, !allow_remote_dcs_for_local_cl); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); // Set local CL QueryRequest::Ptr request(new QueryRequest("", 0)); @@ -494,7 +494,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, AllowRemoteDatacentersForLocalConsist // Allowing remote DCs for local CLs bool allow_remote_dcs_for_local_cl = true; DCAwarePolicy policy(LOCAL_DC, 3, !allow_remote_dcs_for_local_cl); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); // Set local CL QueryRequest::Ptr request(new QueryRequest("", 0)); @@ -517,7 +517,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, StartWithEmptyLocalDatacenter) { // Set local DC using connected host { DCAwarePolicy policy("", 0, false); - policy.init(hosts[Address("2.0.0.0", 9042)], hosts, NULL); + policy.init(hosts[Address("2.0.0.0", 9042)], hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); const size_t seq[] = { 2, 3, 4 }; @@ -527,7 +527,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, StartWithEmptyLocalDatacenter) { // Set local DC using first host with non-empty DC { DCAwarePolicy policy("", 0, false); - policy.init(SharedRefPtr(new Host(Address("0.0.0.0", 9042))), hosts, NULL); + policy.init(SharedRefPtr(new Host(Address("0.0.0.0", 9042))), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); const size_t seq[] = { 1 }; @@ -547,7 +547,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, VerifyEqualDistributionLocalDc) { populate_hosts(3, "rack", REMOTE_DC, &hosts); DCAwarePolicy policy("", 0, false); - policy.init(hosts.begin()->second, hosts, NULL); + policy.init(hosts.begin()->second, hosts, NULL, ""); { // All local nodes QueryCounts counts(run_policy(policy, 12)); @@ -590,7 +590,7 @@ TEST(DatacenterAwareLoadBalancingUnitTest, VerifyEqualDistributionRemoteDc) { populate_hosts(3, "rack", REMOTE_DC, &hosts); DCAwarePolicy policy("", 3, false); // Allow all remote DC nodes - policy.init(hosts.begin()->second, hosts, NULL); + policy.init(hosts.begin()->second, hosts, NULL, ""); Host::Ptr remote_dc_node1; { // Mark down all local nodes @@ -664,7 +664,7 @@ TEST(TokenAwareLoadBalancingUnitTest, Simple) { token_map->build(); TokenAwarePolicy policy(new RoundRobinPolicy(), false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); QueryRequest::Ptr request(new QueryRequest("", 1)); const char* value = "kjdfjkldsdjkl"; // hash: 9024137376112061887 @@ -737,7 +737,7 @@ TEST(TokenAwareLoadBalancingUnitTest, NetworkTopology) { token_map->build(); TokenAwarePolicy policy(new DCAwarePolicy(LOCAL_DC, num_hosts / 2, false), false); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); QueryRequest::Ptr request(new QueryRequest("", 1)); const char* value = "abc"; // hash: -5434086359492102041 @@ -811,7 +811,7 @@ TEST(TokenAwareLoadBalancingUnitTest, ShuffleReplicas) { HostVec not_shuffled; { TokenAwarePolicy policy(new RoundRobinPolicy(), false); // Not shuffled - policy.init(SharedRefPtr(), hosts, &random); + policy.init(SharedRefPtr(), hosts, &random, ""); ScopedPtr qp1(policy.new_query_plan("test", request_handler.get(), token_map.get())); for (int i = 0; i < num_hosts; ++i) { not_shuffled.push_back(qp1->compute_next()); @@ -829,7 +829,7 @@ TEST(TokenAwareLoadBalancingUnitTest, ShuffleReplicas) { // Verify that the shuffle setting does indeed shuffle the replicas { TokenAwarePolicy shuffle_policy(new RoundRobinPolicy(), true); // Shuffled - shuffle_policy.init(SharedRefPtr(), hosts, &random); + shuffle_policy.init(SharedRefPtr(), hosts, &random, ""); HostVec shuffled_previous; ScopedPtr qp( @@ -927,7 +927,7 @@ TEST(LatencyAwareLoadBalancingUnitTest, Simple) { HostMap hosts; populate_hosts(num_hosts, "rack1", LOCAL_DC, &hosts); LatencyAwarePolicy policy(new RoundRobinPolicy(), settings); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); // Record some latencies with 100 ns being the minimum for (HostMap::iterator i = hosts.begin(); i != hosts.end(); ++i) { @@ -989,7 +989,7 @@ TEST(LatencyAwareLoadBalancingUnitTest, MinAverageUnderMinMeasured) { HostMap hosts; populate_hosts(num_hosts, "rack1", LOCAL_DC, &hosts); LatencyAwarePolicy policy(new RoundRobinPolicy(), settings); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); int count = 1; for (HostMap::iterator i = hosts.begin(); i != hosts.end(); ++i) { @@ -1023,7 +1023,7 @@ TEST(WhitelistLoadBalancingUnitTest, Hosts) { whitelist_hosts.push_back("37.0.0.0"); whitelist_hosts.push_back("83.0.0.0"); WhitelistPolicy policy(new RoundRobinPolicy(), whitelist_hosts); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); @@ -1044,7 +1044,7 @@ TEST(WhitelistLoadBalancingUnitTest, Datacenters) { whitelist_dcs.push_back(LOCAL_DC); whitelist_dcs.push_back(REMOTE_DC); WhitelistDCPolicy policy(new RoundRobinPolicy(), whitelist_dcs); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); @@ -1064,7 +1064,7 @@ TEST(BlacklistLoadBalancingUnitTest, Hosts) { blacklist_hosts.push_back("2.0.0.0"); blacklist_hosts.push_back("3.0.0.0"); BlacklistPolicy policy(new RoundRobinPolicy(), blacklist_hosts); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); @@ -1085,7 +1085,7 @@ TEST(BlacklistLoadBalancingUnitTest, Datacenters) { blacklist_dcs.push_back(LOCAL_DC); blacklist_dcs.push_back(REMOTE_DC); BlacklistDCPolicy policy(new RoundRobinPolicy(), blacklist_dcs); - policy.init(SharedRefPtr(), hosts, NULL); + policy.init(SharedRefPtr(), hosts, NULL, ""); ScopedPtr qp(policy.new_query_plan("ks", NULL, NULL)); diff --git a/gtests/src/unit/tests/test_pool.cpp b/gtests/src/unit/tests/test_pool.cpp index 79bd11504..6208ce962 100644 --- a/gtests/src/unit/tests/test_pool.cpp +++ b/gtests/src/unit/tests/test_pool.cpp @@ -70,9 +70,9 @@ class PoolUnitTest : public LoopTest { : public RequestState , public Status { public: - RequestStatus(uv_loop_t* loop, int num_nodes = NUM_NODES) + RequestStatus(uv_loop_t* loop, int num_requests = NUM_NODES) : loop_(loop) - , remaining_(num_nodes) {} + , remaining_(num_requests) {} virtual void set(RequestState::Enum state) { Status::set(state); @@ -88,13 +88,13 @@ class PoolUnitTest : public LoopTest { protected: uv_loop_t* loop_; - size_t remaining_; + int remaining_; }; class RequestStatusWithManager : public RequestStatus { public: - RequestStatusWithManager(uv_loop_t* loop, int num_nodes = NUM_NODES) - : RequestStatus(loop, num_nodes) {} + RequestStatusWithManager(uv_loop_t* loop, int num_requests = NUM_NODES) + : RequestStatus(loop, num_requests) {} ~RequestStatusWithManager() { ConnectionPoolManager::Ptr temp(manager()); @@ -324,7 +324,7 @@ class PoolUnitTest : public LoopTest { if (connection) { RequestStatus status(manager->loop(), 1); RequestCallback::Ptr callback(new RequestCallback(&status)); - EXPECT_TRUE(connection->write(callback.get())) + EXPECT_TRUE(connection->write(callback.get()) > 0) << "Unable to write request to connection " << address.to_string(); connection->flush(); // Flush requests to avoid unnecessary timeouts uv_run(loop(), UV_RUN_DEFAULT); @@ -344,7 +344,7 @@ class PoolUnitTest : public LoopTest { PooledConnection::Ptr connection = manager->find_least_busy(generator.next()); if (connection) { RequestCallback::Ptr callback(new RequestCallback(status)); - if (!connection->write(callback.get())) { + if (connection->write(callback.get()) < 0) { status->error_failed_write(); } } else { @@ -354,6 +354,33 @@ class PoolUnitTest : public LoopTest { } } + static void on_pool_connected_exhaust_streams(ConnectionPoolManagerInitializer* initializer, + RequestStatusWithManager* status) { + const Address address("127.0.0.1", 9042); + ConnectionPoolManager::Ptr manager = initializer->release_manager(); + status->set_manager(manager); + + for (size_t i = 0; i < CASS_MAX_STREAMS; ++i) { + PooledConnection::Ptr connection = manager->find_least_busy(address); + + if (connection) { + RequestCallback::Ptr callback(new RequestCallback(status)); + if (connection->write(callback.get()) < 0) { + status->error_failed_write(); + } + } else { + status->error_no_connection(); + } + } + + PooledConnection::Ptr connection = manager->find_least_busy(address); + ASSERT_TRUE(connection); + RequestCallback::Ptr callback(new RequestCallback(status)); + EXPECT_EQ(connection->write(callback.get()), Request::REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS); + + manager->flush(); + } + static void on_pool_nop(ConnectionPoolManagerInitializer* initializer, RequestStatusWithManager* status) { ConnectionPoolManager::Ptr manager = initializer->release_manager(); @@ -798,6 +825,17 @@ TEST_F(PoolUnitTest, PartialReconnect) { // TODO: } -TEST_F(PoolUnitTest, LowNumberOfStreams) { - // TODO: +TEST_F(PoolUnitTest, NoAvailableStreams) { + mockssandra::SimpleCluster cluster(simple(), 1); + ASSERT_EQ(cluster.start_all(), 0); + + RequestStatusWithManager status(loop(), CASS_MAX_STREAMS); + + ConnectionPoolManagerInitializer::Ptr initializer(new ConnectionPoolManagerInitializer( + PROTOCOL_VERSION, bind_callback(on_pool_connected_exhaust_streams, &status))); + + initializer->initialize(loop(), hosts()); + uv_run(loop(), UV_RUN_DEFAULT); + + EXPECT_EQ(status.count(RequestStatus::SUCCESS), CASS_MAX_STREAMS) << status.results(); } diff --git a/gtests/src/unit/tests/test_request_processor.cpp b/gtests/src/unit/tests/test_request_processor.cpp index 80c83ca9b..d1900397d 100644 --- a/gtests/src/unit/tests/test_request_processor.cpp +++ b/gtests/src/unit/tests/test_request_processor.cpp @@ -27,19 +27,91 @@ using namespace datastax::internal; using namespace datastax::internal::core; +class InorderLoadBalancingPolicy : public LoadBalancingPolicy { +public: + typedef SharedRefPtr Ptr; + typedef Vector Vec; + + InorderLoadBalancingPolicy() + : LoadBalancingPolicy() + , hosts_(new HostVec()) {} + + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) { + hosts_->reserve(hosts.size()); + std::transform(hosts.begin(), hosts.end(), std::back_inserter(*hosts_), GetHost()); + } + + virtual CassHostDistance distance(const Host::Ptr& host) const { + return CASS_HOST_DISTANCE_LOCAL; + } + + virtual bool is_host_up(const Address& address) const { + return std::find_if(hosts_->begin(), hosts_->end(), FindAddress(address)) != hosts_->end(); + } + + virtual void on_host_added(const Host::Ptr& host) { add_host(hosts_, host); } + + virtual void on_host_removed(const Host::Ptr& host) { remove_host(hosts_, host); } + + virtual void on_host_up(const Host::Ptr& host) { add_host(hosts_, host); } + + virtual void on_host_down(const Address& address) { remove_host(hosts_, address); } + + virtual QueryPlan* new_query_plan(const String& keyspace, RequestHandler* request_handler, + const TokenMap* token_map) { + return new InternalQueryPlan(hosts_); + } + + virtual LoadBalancingPolicy* new_instance() { return new InorderLoadBalancingPolicy(); } + +private: + struct FindAddress { + + FindAddress(const Address& address) + : address(address) {} + + bool operator()(const Host::Ptr& host) const { return host->address() == address; } + + Address address; + }; + + class InternalQueryPlan : public datastax::internal::core::QueryPlan { + public: + InternalQueryPlan(const CopyOnWriteHostVec& hosts) + : index_(0) + , hosts_(hosts) {} + + virtual Host::Ptr compute_next() { + if (index_ < hosts_->size()) { + return (*hosts_)[index_++]; + } + return Host::Ptr(); + } + + private: + size_t index_; + CopyOnWriteHostVec hosts_; + }; + +private: + CopyOnWriteHostVec hosts_; +}; + class RequestProcessorUnitTest : public EventLoopTest { public: RequestProcessorUnitTest() : EventLoopTest("RequestProcessorUnitTest") {} - HostMap generate_hosts() { + HostMap generate_hosts(size_t num_hosts = 3) { HostMap hosts; - Host::Ptr host1(new Host(Address("127.0.0.1", PORT))); - Host::Ptr host2(new Host(Address("127.0.0.2", PORT))); - Host::Ptr host3(new Host(Address("127.0.0.3", PORT))); - hosts[host1->address()] = host1; - hosts[host2->address()] = host2; - hosts[host3->address()] = host3; + num_hosts = std::min(num_hosts, static_cast(255)); + for (size_t i = 1; i <= num_hosts; ++i) { + char buf[64]; + sprintf(buf, "127.0.0.%d", static_cast(i)); + Host::Ptr host(new Host(Address(buf, PORT))); + hosts[host->address()] = host; + } return hosts; } @@ -205,7 +277,7 @@ TEST_F(RequestProcessorUnitTest, Simple) { Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); initializer->initialize(event_loop()); @@ -224,7 +296,7 @@ TEST_F(RequestProcessorUnitTest, CloseWithRequestsPending) { Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); initializer->initialize(event_loop()); @@ -262,7 +334,7 @@ TEST_F(RequestProcessorUnitTest, Auth) { Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); RequestProcessorSettings settings; @@ -287,7 +359,7 @@ TEST_F(RequestProcessorUnitTest, Ssl) { Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); initializer->with_settings(settings)->initialize(event_loop()); @@ -311,7 +383,7 @@ TEST_F(RequestProcessorUnitTest, NotifyAddRemoveHost) { Future::Ptr up_future(new Future()); Future::Ptr down_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); RequestProcessorSettings settings; @@ -343,7 +415,7 @@ TEST_F(RequestProcessorUnitTest, CloseDuringReconnect) { Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); RequestProcessorSettings settings; @@ -378,7 +450,7 @@ TEST_F(RequestProcessorUnitTest, CloseDuringAddNewHost) { Future::Ptr close_future(new Future()); Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); CloseListener::Ptr listener(new CloseListener(close_future)); @@ -408,7 +480,7 @@ TEST_F(RequestProcessorUnitTest, PoolDown) { Future::Ptr up_future(new Future()); Future::Ptr down_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); UpDownListener::Ptr listener(new UpDownListener(up_future, down_future, target_host)); @@ -438,7 +510,7 @@ TEST_F(RequestProcessorUnitTest, PoolUp) { Future::Ptr up_future(new Future()); Future::Ptr down_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); RequestProcessorSettings settings; @@ -466,7 +538,7 @@ TEST_F(RequestProcessorUnitTest, InvalidAuth) { Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); RequestProcessorSettings settings; @@ -492,7 +564,7 @@ TEST_F(RequestProcessorUnitTest, InvalidSsl) { Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); SslContext::Ptr ssl_context(SslContextFactory::create()); // No trusted cert @@ -528,7 +600,7 @@ TEST_F(RequestProcessorUnitTest, RollingRestart) { HostMap hosts(generate_hosts()); Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); RequestProcessorSettings settings; @@ -560,7 +632,7 @@ TEST_F(RequestProcessorUnitTest, NoHostsAvailable) { HostMap hosts(generate_hosts()); Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); initializer->with_listener(listener.get())->initialize(event_loop()); @@ -596,7 +668,7 @@ TEST_F(RequestProcessorUnitTest, RequestTimeout) { HostMap hosts(generate_hosts()); Future::Ptr connect_future(new Future()); RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( - hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", bind_callback(on_connected, connect_future.get()))); initializer->with_listener(listener.get())->initialize(event_loop()); @@ -619,3 +691,73 @@ TEST_F(RequestProcessorUnitTest, RequestTimeout) { processor->close(); ASSERT_TRUE(close_future->wait_for(WAIT_FOR_TIME)); } + +TEST_F(RequestProcessorUnitTest, LowNumberOfStreams) { + mockssandra::SimpleRequestHandlerBuilder builder; + builder.on(mockssandra::OPCODE_QUERY) + .wait(1000) // Give time for the streams to run out + .system_local() + .system_peers() + .empty_rows_result(1); + mockssandra::SimpleCluster cluster(builder.build(), 2); // Two node cluster + ASSERT_EQ(cluster.start_all(), 0); + + Future::Ptr close_future(new Future()); + CloseListener::Ptr listener(new CloseListener(close_future)); + + HostMap hosts(generate_hosts(2)); + Future::Ptr connect_future(new Future()); + + ExecutionProfile profile; + profile.set_load_balancing_policy(new InorderLoadBalancingPolicy()); + profile.set_speculative_execution_policy(new NoSpeculativeExecutionPolicy()); + profile.set_retry_policy(new DefaultRetryPolicy()); + + RequestProcessorSettings settings; + settings.default_profile = profile; + settings.request_queue_size = 2 * CASS_MAX_STREAMS + 1; // Create a request queue with enough room + + RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( + hosts.begin()->second, PROTOCOL_VERSION, hosts, TokenMap::Ptr(), "", + bind_callback(on_connected, connect_future.get()))); + initializer->with_settings(settings)->with_listener(listener.get())->initialize(event_loop()); + + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(connect_future->error()); + RequestProcessor::Ptr processor(connect_future->processor()); + + // Saturate the hosts connections, but leave one stream. + for (int i = 0; i < 2 * CASS_MAX_STREAMS - 1; ++i) { + ResponseFuture::Ptr response_future(new ResponseFuture()); + Statement::Ptr request(new QueryRequest("SELECT * FROM table")); + RequestHandler::Ptr request_handler(new RequestHandler(request, response_future)); + processor->process_request(request_handler); + } + + { // Try two more requests. One should succeed on "127.0.0.2" and the other should fail (out of + // streams). + ResponseFuture::Ptr response_future(new ResponseFuture()); + + Statement::Ptr request(new QueryRequest("SELECT * FROM table")); + request->set_record_attempted_addresses(true); + RequestHandler::Ptr request_handler(new RequestHandler(request, response_future)); + processor->process_request(request_handler); + + ResponseFuture::Ptr response_future_fail(new ResponseFuture()); + RequestHandler::Ptr request_handler_fail(new RequestHandler( + Statement::Ptr(new QueryRequest("SELECT * FROM table")), response_future_fail)); + processor->process_request(request_handler_fail); + ASSERT_TRUE(response_future_fail->wait_for(WAIT_FOR_TIME)); + ASSERT_TRUE(response_future_fail->error()); + EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, response_future_fail->error()->code); + + ASSERT_TRUE(response_future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(response_future->error()); + AddressVec attempted = response_future->attempted_addresses(); + ASSERT_GE(attempted.size(), 1u); + EXPECT_EQ(attempted[0], Address("127.0.0.2", PORT)); + } + + processor->close(); + ASSERT_TRUE(close_future->wait_for(WAIT_FOR_TIME)); +} diff --git a/gtests/src/unit/tests/test_session.cpp b/gtests/src/unit/tests/test_session.cpp index 6693183cb..091b8c36e 100644 --- a/gtests/src/unit/tests/test_session.cpp +++ b/gtests/src/unit/tests/test_session.cpp @@ -47,10 +47,14 @@ class SessionUnitTest : public EventLoopTest { outage_plan->stop_node(1, OUTAGE_PLAN_DELAY); } - void query_on_threads(Session* session) { + void query_on_threads(Session* session, bool is_chaotic = false) { uv_thread_t threads[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; ++i) { - ASSERT_EQ(0, uv_thread_create(&threads[i], query, session)); + if (is_chaotic) { + ASSERT_EQ(0, uv_thread_create(&threads[i], query_is_chaotic, session)); + } else { + ASSERT_EQ(0, uv_thread_create(&threads[i], query, session)); + } } for (int i = 0; i < NUM_THREADS; ++i) { uv_thread_join(&threads[i]); @@ -73,7 +77,7 @@ class SessionUnitTest : public EventLoopTest { for (size_t i = 1; i <= num_nodes; ++i) { OStringStream ss; ss << "127.0.0." << i; - config.contact_points().push_back(ss.str()); + config.contact_points().push_back(Address(ss.str(), 9042)); } if (ssl_context) { config.set_ssl_context(ssl_context); @@ -89,14 +93,21 @@ class SessionUnitTest : public EventLoopTest { << cass_error_desc(close_future->error()->code) << ": " << close_future->error()->message; } - static void query(Session* session) { + static void query(Session* session, bool is_chaotic = false) { QueryRequest::Ptr request(new QueryRequest("blah", 0)); request->set_is_idempotent(true); Future::Ptr future = session->execute(request, NULL); ASSERT_TRUE(future->wait_for(WAIT_FOR_TIME)) << "Timed out executing query"; - ASSERT_FALSE(future->error()) << cass_error_desc(future->error()->code) << ": " - << future->error()->message; + if (future->error()) fprintf(stderr, "%s\n", cass_error_desc(future->error()->code)); + if (is_chaotic) { + ASSERT_TRUE(future->error() == NULL || + future->error()->code == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE) + << cass_error_desc(future->error()->code) << ": " << future->error()->message; + } else { + ASSERT_FALSE(future->error()) + << cass_error_desc(future->error()->code) << ": " << future->error()->message; + } } // uv_thread_create @@ -104,6 +115,25 @@ class SessionUnitTest : public EventLoopTest { Session* session = static_cast(arg); query(session); } + static void query_is_chaotic(void* arg) { + Session* session = static_cast(arg); + query(session, true); + } + + bool check_consistency(const Session& session, CassConsistency expected_consistency, + CassConsistency expected_profile_consistency) { + Config session_config = session.config(); + EXPECT_EQ(expected_consistency, session_config.consistency()); + + const ExecutionProfile::Map& profiles = session_config.profiles(); + for (ExecutionProfile::Map::const_iterator it = profiles.begin(), end = profiles.end(); + it != end; ++it) { + if (expected_profile_consistency != it->second.consistency()) { + return false; + } + } + return true; + } class HostEventFuture : public Future { public: @@ -195,6 +225,54 @@ class SessionUnitTest : public EventLoopTest { uv_mutex_t mutex_; EventQueue events_; }; + + class LocalDcClusterMetadataResolver : public ClusterMetadataResolver { + public: + LocalDcClusterMetadataResolver(const String& local_dc) + : desired_local_dc_(local_dc) {} + + private: + virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) { + resolved_contact_points_ = contact_points; + local_dc_ = desired_local_dc_; + callback_(this); + } + + virtual void internal_cancel() {} + + private: + String desired_local_dc_; + }; + + class LocalDcClusterMetadataResolverFactory : public ClusterMetadataResolverFactory { + public: + LocalDcClusterMetadataResolverFactory(const String& local_dc) + : local_dc_(local_dc) {} + + virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const { + return ClusterMetadataResolver::Ptr(new LocalDcClusterMetadataResolver(local_dc_)); + } + + virtual const char* name() const { return "LocalDc"; } + + private: + String local_dc_; + }; + + class SupportedDbaasOptions : public mockssandra::Action { + public: + virtual void on_run(mockssandra::Request* request) const { + Vector product_type; + product_type.push_back("DATASTAX_APOLLO"); + + StringMultimap supported; + supported["PRODUCT_TYPE"] = product_type; + + String body; + mockssandra::encode_string_map(supported, &body); + request->write(mockssandra::OPCODE_SUPPORTED, body); + } + }; }; TEST_F(SessionUnitTest, ExecuteQueryNotConnected) { @@ -216,7 +294,7 @@ TEST_F(SessionUnitTest, InvalidKeyspace) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); Session session; Future::Ptr connect_future(session.connect(config, "invalid")); @@ -231,7 +309,7 @@ TEST_F(SessionUnitTest, InvalidDataCenter) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_load_balancing_policy(new DCAwarePolicy("invalid_data_center", 0, false)); Session session; @@ -248,7 +326,7 @@ TEST_F(SessionUnitTest, InvalidLocalAddress) { Config config; config.set_local_address(Address("1.1.1.1", PORT)); // Invalid - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_load_balancing_policy(new DCAwarePolicy("invalid_data_center", 0, false)); Session session; @@ -295,7 +373,7 @@ TEST_F(SessionUnitTest, ExecuteQueryReusingSessionChaotic) { Future::Ptr outage_future = execute_outage_plan(&outage_plan); while (!outage_future->wait_for(1000)) { // 1 millisecond wait connect(&session, NULL, WAIT_FOR_TIME * 3, 4); - query(&session); + query(&session, true); close(&session, WAIT_FOR_TIME * 3); } } @@ -312,7 +390,7 @@ TEST_F(SessionUnitTest, ExecuteQueryReusingSessionUsingSslChaotic) { Future::Ptr outage_future = execute_outage_plan(&outage_plan); while (!outage_future->wait_for(1000)) { // 1 millisecond wait connect(&session, ssl_context.get(), WAIT_FOR_TIME * 3, 4); - query(&session); + query(&session, true); close(&session, WAIT_FOR_TIME * 3); } } @@ -360,7 +438,8 @@ TEST_F(SessionUnitTest, ExecuteQueryWithCompleteOutageSpinDown) { QueryRequest::Ptr request(new QueryRequest("blah", 0)); Future::Ptr future = session.execute(request, NULL); ASSERT_TRUE(future->wait_for(WAIT_FOR_TIME)); - ASSERT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, future->error()->code); + EXPECT_TRUE(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE == future->error()->code || + CASS_ERROR_LIB_REQUEST_TIMED_OUT == future->error()->code); // Restart a node and execute query to ensure session recovers ASSERT_EQ(cluster.start(2), 0); @@ -403,7 +482,7 @@ TEST_F(SessionUnitTest, ExecuteQueryWithThreadsChaotic) { Future::Ptr outage_future = execute_outage_plan(&outage_plan); while (!outage_future->wait_for(1000)) { // 1 millisecond wait - query_on_threads(&session); + query_on_threads(&session, true); } close(&session); @@ -422,7 +501,7 @@ TEST_F(SessionUnitTest, ExecuteQueryWithThreadsUsingSslChaotic) { Future::Ptr outage_future = execute_outage_plan(&outage_plan); while (!outage_future->wait_for(1000)) { // 1 millisecond wait - query_on_threads(&session); + query_on_threads(&session, true); } close(&session); @@ -436,7 +515,7 @@ TEST_F(SessionUnitTest, HostListener) { Config config; config.set_constant_reconnect(100); // Reconnect immediately - config.contact_points().push_back("127.0.0.2"); + config.contact_points().push_back(Address("127.0.0.2", 9042)); config.set_host_listener(listener); Session session; @@ -494,7 +573,7 @@ TEST_F(SessionUnitTest, HostListenerDCAwareLocal) { Config config; config.set_constant_reconnect(100); // Reconnect immediately - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_host_listener(listener); Session session; @@ -531,7 +610,7 @@ TEST_F(SessionUnitTest, HostListenerDCAwareRemote) { Config config; config.set_constant_reconnect(100); // Reconnect immediately - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_load_balancing_policy(new DCAwarePolicy("dc1", 1, false)); config.set_host_listener(listener); @@ -573,7 +652,7 @@ TEST_F(SessionUnitTest, HostListenerNodeDown) { Config config; config.set_constant_reconnect(100); // Reconnect immediately - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_host_listener(listener); Session session; @@ -609,3 +688,298 @@ TEST_F(SessionUnitTest, HostListenerNodeDown) { ASSERT_EQ(0u, listener->event_count()); } + +TEST_F(SessionUnitTest, LocalDcUpdatedOnPolicy) { + mockssandra::SimpleCluster cluster(simple(), 3, 1); + ASSERT_EQ(cluster.start_all(), 0); + + TestHostListener::Ptr listener(new TestHostListener()); + + Config config; + config.contact_points().push_back(Address("127.0.0.4", 9042)); + config.set_cluster_metadata_resolver_factory( + ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2"))); + config.set_host_listener(listener); + + Session session; + connect(config, &session); + + { // Initial nodes available from peers table (should skip DC1) + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.4", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.4", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + } + + for (int i = 0; i < 20; ++i) { // Validate the request processors are using DC2 only + QueryRequest::Ptr request(new QueryRequest("blah", 0)); + + ResponseFuture::Ptr future = session.execute(request, NULL); + EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(future->error()); + EXPECT_EQ("127.0.0.4", future->address().to_string()); + } + + close(&session); + + ASSERT_EQ(0u, listener->event_count()); +} + +TEST_F(SessionUnitTest, LocalDcNotOverriddenOnPolicy) { + mockssandra::SimpleCluster cluster(simple(), 1, 3); + ASSERT_EQ(cluster.start_all(), 0); + + TestHostListener::Ptr listener(new TestHostListener()); + + Config config; + config.contact_points().push_back(Address("127.0.0.1", 9042)); + config.set_load_balancing_policy(new DCAwarePolicy("dc1")); + config.set_cluster_metadata_resolver_factory( + ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2"))); + config.set_host_listener(listener); + + Session session; + connect(config, &session); + + { // Initial nodes available from peers table (should be DC1) + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.1", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.1", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + } + + for (int i = 0; i < 20; ++i) { // Validate the request processors are using DC1 only + QueryRequest::Ptr request(new QueryRequest("blah", 0)); + + ResponseFuture::Ptr future = session.execute(request, NULL); + EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(future->error()); + EXPECT_EQ("127.0.0.1", future->address().to_string()); + } + + close(&session); + + ASSERT_EQ(0u, listener->event_count()); +} + +TEST_F(SessionUnitTest, LocalDcOverriddenOnPolicyUsingExecutionProfiles) { + mockssandra::SimpleCluster cluster(simple(), 3, 1); + ASSERT_EQ(cluster.start_all(), 0); + + TestHostListener::Ptr listener(new TestHostListener()); + + Config config; + config.contact_points().push_back(Address("127.0.0.4", 9042)); + config.set_use_randomized_contact_points( + false); // Ensure round robin order over DC for query execution + config.set_cluster_metadata_resolver_factory( + ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2"))); + config.set_host_listener(listener); + + ExecutionProfile profile; + profile.set_load_balancing_policy(new DCAwarePolicy()); + config.set_execution_profile("use_propagated_local_dc", &profile); + + Session session; + connect(config, &session); + + { // Initial nodes available from peers table (should be DC2) + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.4", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.4", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + } + + for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC2 only + QueryRequest::Ptr request(new QueryRequest("blah", 0)); + + ResponseFuture::Ptr future = session.execute(request, NULL); + EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(future->error()); + EXPECT_EQ("127.0.0.4", future->address().to_string()); + } + + for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC2 only + QueryRequest::Ptr request(new QueryRequest("blah", 0)); + request->set_execution_profile_name("use_propagated_local_dc"); + + ResponseFuture::Ptr future = session.execute(request, NULL); + EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(future->error()); + EXPECT_EQ("127.0.0.4", future->address().to_string()); + } + + close(&session); + + ASSERT_EQ(0u, listener->event_count()); +} + +TEST_F(SessionUnitTest, LocalDcNotOverriddenOnPolicyUsingExecutionProfiles) { + mockssandra::SimpleCluster cluster(simple(), 3, 1); + ASSERT_EQ(cluster.start_all(), 0); + + TestHostListener::Ptr listener(new TestHostListener()); + + Config config; + config.contact_points().push_back(Address("127.0.0.4", 9042)); + config.set_use_randomized_contact_points( + false); // Ensure round robin order over DC for query execution + config.set_cluster_metadata_resolver_factory( + ClusterMetadataResolverFactory::Ptr(new LocalDcClusterMetadataResolverFactory("dc2"))); + config.set_host_listener(listener); + + ExecutionProfile profile; + profile.set_load_balancing_policy(new DCAwarePolicy("dc1")); + config.set_execution_profile("use_dc1", &profile); + + Session session; + connect(config, &session); + + { // Initial nodes available from peers table (should be DC1 and DC2) + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.1", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.1", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.2", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.2", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.3", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.3", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::ADD_NODE, Address("127.0.0.4", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + EXPECT_EQ(HostEventFuture::Event(HostEventFuture::START_NODE, Address("127.0.0.4", 9042)), + listener->wait_for_event(WAIT_FOR_TIME)); + } + + for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC2 only + QueryRequest::Ptr request(new QueryRequest("blah", 0)); + + ResponseFuture::Ptr future = session.execute(request, NULL); + EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(future->error()); + EXPECT_EQ("127.0.0.4", future->address().to_string()); + } + + for (int i = 0; i < 20; ++i) { // Validate the default profile is using DC1 only + QueryRequest::Ptr request(new QueryRequest("blah", 0)); + request->set_execution_profile_name("use_dc1"); + + ResponseFuture::Ptr future = session.execute(request, NULL); + EXPECT_TRUE(future->wait_for(WAIT_FOR_TIME)); + EXPECT_FALSE(future->error()); + EXPECT_NE("127.0.0.4", future->address().to_string()); + } + + close(&session); + + ASSERT_EQ(0u, listener->event_count()); +} + +TEST_F(SessionUnitTest, NoContactPoints) { + // No cluster needed + + Config config; + config.contact_points().clear(); + + Session session; + Future::Ptr connect_future(session.connect(config)); + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)) + << "Timed out waiting for session to connect"; + ASSERT_TRUE(connect_future->error()); + EXPECT_EQ(connect_future->error()->code, CASS_ERROR_LIB_NO_HOSTS_AVAILABLE); +} + +TEST_F(SessionUnitTest, DefaultConsistency) { + mockssandra::SimpleCluster cluster(simple()); + ASSERT_EQ(cluster.start_all(), 0); + + Session session; + { + Config session_config = session.config(); + EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency()); + } + + ExecutionProfile profile; + Config config; + config.contact_points().push_back(Address("127.0.0.1", 9042)); + config.set_execution_profile("profile", &profile); + connect(config, &session); + + EXPECT_TRUE(check_consistency(session, CASS_DEFAULT_CONSISTENCY, CASS_DEFAULT_CONSISTENCY)); + + close(&session); +} + +TEST_F(SessionUnitTest, DefaultConsistencyExecutionProfileNotUpdated) { + mockssandra::SimpleCluster cluster(simple()); + ASSERT_EQ(cluster.start_all(), 0); + + Session session; + { + Config session_config = session.config(); + EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency()); + } + + ExecutionProfile profile; + profile.set_consistency(CASS_CONSISTENCY_LOCAL_QUORUM); + Config config; + config.contact_points().push_back(Address("127.0.0.1", 9042)); + config.set_execution_profile("profile", &profile); + connect(config, &session); + + EXPECT_TRUE(check_consistency(session, CASS_DEFAULT_CONSISTENCY, CASS_CONSISTENCY_LOCAL_QUORUM)); + + close(&session); +} + +TEST_F(SessionUnitTest, DbaasDetectionUpdateDefaultConsistency) { + mockssandra::SimpleRequestHandlerBuilder builder; + builder.on(mockssandra::OPCODE_OPTIONS).execute(new SupportedDbaasOptions()); + mockssandra::SimpleCluster cluster(builder.build()); + ASSERT_EQ(cluster.start_all(), 0); + + Session session; + { + Config session_config = session.config(); + EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency()); + } + + ExecutionProfile profile; + Config config; + config.contact_points().push_back(Address("127.0.0.1", 9042)); + config.set_execution_profile("profile", &profile); + connect(config, &session); + + EXPECT_TRUE( + check_consistency(session, CASS_DEFAULT_DBAAS_CONSISTENCY, CASS_DEFAULT_DBAAS_CONSISTENCY)); + + close(&session); +} + +TEST_F(SessionUnitTest, DbaasDefaultConsistencyExecutionProfileNotUpdate) { + mockssandra::SimpleRequestHandlerBuilder builder; + builder.on(mockssandra::OPCODE_OPTIONS).execute(new SupportedDbaasOptions()); + mockssandra::SimpleCluster cluster(builder.build()); + ASSERT_EQ(cluster.start_all(), 0); + + Session session; + { + Config session_config = session.config(); + EXPECT_EQ(CASS_CONSISTENCY_UNKNOWN, session_config.consistency()); + } + + ExecutionProfile profile; + profile.set_consistency(CASS_CONSISTENCY_LOCAL_ONE); + Config config; + config.contact_points().push_back(Address("127.0.0.1", 9042)); + config.set_execution_profile("profile", &profile); + connect(config, &session); + + EXPECT_TRUE( + check_consistency(session, CASS_DEFAULT_DBAAS_CONSISTENCY, CASS_CONSISTENCY_LOCAL_ONE)); + + close(&session); +} diff --git a/gtests/src/unit/tests/test_session_base.cpp b/gtests/src/unit/tests/test_session_base.cpp index 031bec368..2a1820b70 100644 --- a/gtests/src/unit/tests/test_session_base.cpp +++ b/gtests/src/unit/tests/test_session_base.cpp @@ -44,25 +44,27 @@ class TestSessionBase : public SessionBase { protected: virtual void on_connect(const Host::Ptr& connected_host, ProtocolVersion protocol_version, - const HostMap& hosts, const TokenMap::Ptr& token_map) { + const HostMap& hosts, const TokenMap::Ptr& token_map, + const String& local_dc) { ++connected_; - ASSERT_STREQ("127.0.0.1", connected_host->address_string().c_str()); - ASSERT_EQ(ProtocolVersion(PROTOCOL_VERSION), protocol_version); - ASSERT_EQ(1u, hosts.size()); - ASSERT_EQ(state(), SESSION_STATE_CONNECTING); + + EXPECT_STREQ("127.0.0.1", connected_host->address_string().c_str()); + EXPECT_EQ(ProtocolVersion(PROTOCOL_VERSION), protocol_version); + EXPECT_EQ(1u, hosts.size()); + EXPECT_EQ(state(), SESSION_STATE_CONNECTING); notify_connected(); } virtual void on_connect_failed(CassError code, const String& message) { ++failed_; - ASSERT_EQ(state(), SESSION_STATE_CONNECTING); + EXPECT_EQ(state(), SESSION_STATE_CONNECTING); notify_connect_failed(code, message); - ASSERT_EQ(state(), SESSION_STATE_CLOSED); + EXPECT_EQ(state(), SESSION_STATE_CLOSED); } virtual void on_close() { ++closed_; - ASSERT_EQ(state(), SESSION_STATE_CLOSING); + EXPECT_EQ(state(), SESSION_STATE_CLOSING); notify_closed(); } @@ -79,7 +81,7 @@ TEST_F(SessionBaseUnitTest, Simple) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); @@ -103,7 +105,7 @@ TEST_F(SessionBaseUnitTest, SimpleEmptyKeyspaceWithoutRandom) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_use_randomized_contact_points(false); TestSessionBase session_base; @@ -129,7 +131,7 @@ TEST_F(SessionBaseUnitTest, Ssl) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_ssl_context(settings.socket_settings.ssl_context.get()); TestSessionBase session_base; @@ -155,8 +157,8 @@ TEST_F(SessionBaseUnitTest, SimpleInvalidContactPointsIp) { Config config; config.set_use_randomized_contact_points(false); - config.contact_points().push_back("123.456.789.012"); - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("123.456.789.012", 9042)); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); @@ -179,8 +181,8 @@ TEST_F(SessionBaseUnitTest, SimpleInvalidContactPointsHostname) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("doesnotexist.dne"); - config.contact_points().push_back("localhost"); + config.contact_points().push_back(Address("doesnotexist.dne", 9042)); + config.contact_points().push_back(Address("localhost", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); @@ -205,7 +207,7 @@ TEST_F(SessionBaseUnitTest, InvalidProtocol) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); @@ -216,6 +218,25 @@ TEST_F(SessionBaseUnitTest, InvalidProtocol) { EXPECT_EQ(0, session_base.closed()); } +TEST_F(SessionBaseUnitTest, UnsupportedProtocol) { + mockssandra::SimpleCluster cluster(simple()); + ASSERT_EQ(cluster.start_all(), 0); + + Config config; + config.set_protocol_version(ProtocolVersion(2)); // Unsupported protocol version + config.contact_points().push_back(Address("127.0.0.1", 9042)); + TestSessionBase session_base; + + Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); + ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)); + EXPECT_EQ(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, connect_future->error()->code); + EXPECT_TRUE(connect_future->error()->message.find( + "Operation unsupported by this protocol version") != String::npos); + EXPECT_EQ(0, session_base.connected()); + EXPECT_EQ(1, session_base.failed()); + EXPECT_EQ(0, session_base.closed()); +} + TEST_F(SessionBaseUnitTest, SslError) { mockssandra::SimpleCluster cluster(simple()); use_ssl(&cluster); @@ -224,7 +245,7 @@ TEST_F(SessionBaseUnitTest, SslError) { SslContext::Ptr invalid_ssl_context(SslContextFactory::create()); invalid_ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_CERT); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_ssl_context(invalid_ssl_context.get()); TestSessionBase session_base; @@ -241,7 +262,7 @@ TEST_F(SessionBaseUnitTest, Auth) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); config.set_credentials("cassandra", "cassandra"); TestSessionBase session_base; @@ -263,7 +284,7 @@ TEST_F(SessionBaseUnitTest, BadCredentials) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); @@ -276,7 +297,7 @@ TEST_F(SessionBaseUnitTest, BadCredentials) { TEST_F(SessionBaseUnitTest, NoHostsAvailable) { Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); @@ -292,7 +313,7 @@ TEST_F(SessionBaseUnitTest, ConnectWhenAlreadyConnected) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; { @@ -323,7 +344,7 @@ TEST_F(SessionBaseUnitTest, CloseWhenAlreadyClosed) { ASSERT_EQ(cluster.start_all(), 0); Config config; - config.contact_points().push_back("127.0.0.1"); + config.contact_points().push_back(Address("127.0.0.1", 9042)); TestSessionBase session_base; Future::Ptr connect_future(session_base.connect(config, KEYSPACE)); diff --git a/gtests/src/unit/tests/test_socket.cpp b/gtests/src/unit/tests/test_socket.cpp index 26f77d31a..176bdbd91 100644 --- a/gtests/src/unit/tests/test_socket.cpp +++ b/gtests/src/unit/tests/test_socket.cpp @@ -20,9 +20,56 @@ #include "socket_connector.hpp" #include "ssl.hpp" -#define SSL_VERIFY_PEER_DNS_RELATIVE_HOSTNAME "cpp-driver.hostname" -#define SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME SSL_VERIFY_PEER_DNS_RELATIVE_HOSTNAME "." -#define SSL_VERIFY_PEER_DNS_IP_ADDRESS "127.254.254.254" +#define DNS_HOSTNAME "cpp-driver.hostname." +#define DNS_IP_ADDRESS "127.254.254.254" + +using mockssandra::internal::ClientConnection; +using mockssandra::internal::ClientConnectionFactory; +using mockssandra::internal::ServerConnection; + +class CloseConnection : public ClientConnection { +public: + CloseConnection(ServerConnection* server) + : ClientConnection(server) {} + + virtual int on_accept() { + int rc = accept(); + if (rc != 0) { + return rc; + } + close(); + return rc; + } +}; + +class CloseConnectionFactory : public ClientConnectionFactory { +public: + virtual ClientConnection* create(ServerConnection* server) const { + return new CloseConnection(server); + } +}; + +class SniServerNameConnection : public ClientConnection { +public: + SniServerNameConnection(ServerConnection* server) + : ClientConnection(server) {} + + virtual void on_read(const char* data, size_t len) { + const char* server_name = sni_server_name(); + if (server_name) { + write(String(server_name) + " - Closed"); + } else { + write(" - Closed"); + } + } +}; + +class SniServerNameConnectionFactory : public ClientConnectionFactory { +public: + virtual ClientConnection* create(ServerConnection* server) const { + return new SniServerNameConnection(server); + } +}; using namespace datastax; using namespace datastax::internal; @@ -88,19 +135,27 @@ class SocketUnitTest : public LoopTest { return settings; } - void listen() { ASSERT_EQ(server_.listen(), 0); } - - void reset(const Address& address) { server_.reset(address); } + void listen(const Address& address = Address("127.0.0.1", 8888)) { + ASSERT_EQ(server_.listen(address), 0); + } void close() { server_.close(); } - void use_close_immediately() { server_.use_close_immediately(); } + void use_close_immediately() { server_.use_connection_factory(new CloseConnectionFactory()); } + void use_sni_server_name() { + server_.use_connection_factory(new SniServerNameConnectionFactory()); + } virtual void TearDown() { LoopTest::TearDown(); close(); } + bool verify_dns() { + verify_dns_check(); // Verify address can be resolved + return !HasFailure(); + } + static void on_socket_connected(SocketConnector* connector, String* result) { Socket::Ptr socket = connector->release_socket(); if (connector->error_code() == SocketConnector::SOCKET_OK) { @@ -136,16 +191,32 @@ class SocketUnitTest : public LoopTest { } } - static void on_request(uv_getnameinfo_t* handle, int status, const char* hostname, - const char* service) { + static void on_request(uv_getaddrinfo_t* handle, int status, struct addrinfo* res) { if (status) { - FAIL() << "Unable to Execute Test SocketUnitTest.SslVerifyIdentityDns: " - << "Add /etc/hosts entry " << SSL_VERIFY_PEER_DNS_IP_ADDRESS << "\t" - << SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME; - } else if (String(hostname) != String(SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME)) { - FAIL() << "Invalid /etc/hosts entry for: '" << hostname << "' != '" - << SSL_VERIFY_PEER_DNS_ABSOLUTE_HOSTNAME << "'"; + FAIL() << "Unable to Execute Test: " + << "Add /etc/hosts entry " << DNS_IP_ADDRESS << "\t" << DNS_HOSTNAME; + } else { + bool match = false; + do { + Address address(res->ai_addr); + if (address.is_valid_and_resolved() && address == Address(DNS_IP_ADDRESS, 8888)) { + match = true; + break; + } + res = res->ai_next; + } while (res); + ASSERT_TRUE(match) << "Invalid /etc/hosts entry for: '" << DNS_HOSTNAME << "' != '" + << DNS_IP_ADDRESS << "'"; } + uv_freeaddrinfo(res); + } + +private: + void verify_dns_check() { + uv_getaddrinfo_t request; + Address::SocketStorage storage; + ASSERT_EQ(0, uv_getaddrinfo(loop(), &request, on_request, DNS_HOSTNAME, "8888", NULL)); + uv_run(loop(), UV_RUN_DEFAULT); } private: @@ -166,11 +237,27 @@ TEST_F(SocketUnitTest, Simple) { EXPECT_EQ(result, "The socket is successfully connected and wrote data - Closed"); } -TEST_F(SocketUnitTest, Ssl) { - listen(); +TEST_F(SocketUnitTest, SimpleDns) { + if (!verify_dns()) return; + + listen(Address(DNS_IP_ADDRESS, 8888)); + + String result; + SocketConnector::Ptr connector(new SocketConnector(Address(DNS_HOSTNAME, 8888), + bind_callback(on_socket_connected, &result))); + connector->connect(loop()); + + uv_run(loop(), UV_RUN_DEFAULT); + + EXPECT_EQ(result, "The socket is successfully connected and wrote data - Closed"); +} + +TEST_F(SocketUnitTest, Ssl) { SocketSettings settings(use_ssl()); + listen(); + String result; SocketConnector::Ptr connector( new SocketConnector(Address("127.0.0.1", 8888), bind_callback(on_socket_connected, &result))); @@ -182,6 +269,24 @@ TEST_F(SocketUnitTest, Ssl) { EXPECT_EQ(result, "The socket is successfully connected and wrote data - Closed"); } +TEST_F(SocketUnitTest, SslSniServerName) { + SocketSettings settings(use_ssl()); + + use_sni_server_name(); + listen(); + + String result; + SocketConnector::Ptr connector( + new SocketConnector(Address("127.0.0.1", 8888, "TestSniServerName"), + bind_callback(on_socket_connected, &result))); + + connector->with_settings(settings)->connect(loop()); + + uv_run(loop(), UV_RUN_DEFAULT); + + EXPECT_EQ(result, "TestSniServerName - Closed"); +} + TEST_F(SocketUnitTest, Refused) { bool is_refused = false; SocketConnector::Ptr connector(new SocketConnector( @@ -194,11 +299,11 @@ TEST_F(SocketUnitTest, Refused) { } TEST_F(SocketUnitTest, SslClose) { + SocketSettings settings(use_ssl()); + use_close_immediately(); listen(); - SocketSettings settings(use_ssl()); - Vector connectors; bool is_closed = false; @@ -241,10 +346,10 @@ TEST_F(SocketUnitTest, Cancel) { } TEST_F(SocketUnitTest, SslCancel) { - listen(); - SocketSettings settings(use_ssl()); + listen(); + Vector connectors; bool is_canceled = false; @@ -268,9 +373,10 @@ TEST_F(SocketUnitTest, SslCancel) { } TEST_F(SocketUnitTest, SslVerifyIdentity) { + SocketSettings settings(use_ssl("127.0.0.1")); + listen(); - SocketSettings settings(use_ssl("127.0.0.1")); settings.ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_IDENTITY); String result; @@ -285,26 +391,17 @@ TEST_F(SocketUnitTest, SslVerifyIdentity) { } TEST_F(SocketUnitTest, SslVerifyIdentityDns) { - // Verify address can be resolved - Address verify_entry; - Address::from_string(SSL_VERIFY_PEER_DNS_IP_ADDRESS, 8888, &verify_entry); - uv_getnameinfo_t request; - ASSERT_EQ(0, uv_getnameinfo(loop(), &request, on_request, - static_cast(verify_entry).addr(), 0)); - uv_run(loop(), UV_RUN_DEFAULT); - if (this->HasFailure()) { // Make test fail due to DNS not configured - return; - } + if (!verify_dns()) return; - reset(Address(SSL_VERIFY_PEER_DNS_IP_ADDRESS, - 8888)); // Ensure the echo server is listening on the correct address - listen(); + SocketSettings settings(use_ssl(DNS_HOSTNAME)); + + listen(Address(DNS_IP_ADDRESS, 8888)); - SocketSettings settings(use_ssl(SSL_VERIFY_PEER_DNS_RELATIVE_HOSTNAME)); settings.ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_IDENTITY_DNS); + settings.resolve_timeout_ms = 12000; String result; - SocketConnector::Ptr connector(new SocketConnector(Address(SSL_VERIFY_PEER_DNS_IP_ADDRESS, 8888), + SocketConnector::Ptr connector(new SocketConnector(Address(DNS_HOSTNAME, 8888), bind_callback(on_socket_connected, &result))); connector->with_settings(settings)->connect(loop()); diff --git a/gtests/src/unit/tests/test_startup_options.cpp b/gtests/src/unit/tests/test_startup_options.cpp index 5288bdc2a..cffc77178 100644 --- a/gtests/src/unit/tests/test_startup_options.cpp +++ b/gtests/src/unit/tests/test_startup_options.cpp @@ -56,7 +56,7 @@ class StartupRequestUnitTest : public Unit { } void connect() { - config_.contact_points().push_back("127.0.0.1"); + config_.contact_points().push_back(Address("127.0.0.1", 9042)); internal::core::Future::Ptr connect_future(session_.connect(config_)); ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)) << "Timed out waiting for session to connect"; diff --git a/gtests/src/unit/tests/test_statement.cpp b/gtests/src/unit/tests/test_statement.cpp index 7abd83a9c..891aac959 100644 --- a/gtests/src/unit/tests/test_statement.cpp +++ b/gtests/src/unit/tests/test_statement.cpp @@ -16,6 +16,7 @@ #include "unit.hpp" +#include "batch_request.hpp" #include "constants.hpp" #include "control_connection.hpp" #include "query_request.hpp" @@ -32,8 +33,9 @@ class StatementUnitTest : public Unit { void connect(const Config& config = Config()) { Config temp(config); - temp.contact_points().push_back("127.0.0.1"); - temp.contact_points().push_back("127.0.0.2"); // At least one more host (in case node 1 is down) + temp.contact_points().push_back(Address("127.0.0.1", 9042)); + temp.contact_points().push_back( + Address("127.0.0.2", 9042)); // At least one more host (in case node 1 is down) Future::Ptr connect_future(session.connect(temp)); ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)) << "Timed out waiting for session to connect"; @@ -54,7 +56,8 @@ class StatementUnitTest : public Unit { CassInet inet; ASSERT_TRUE(value->decoder().as_inet(value->size(), &inet)); - ASSERT_TRUE(Address::from_inet(inet.address, inet.address_length, 9042, output)); + *output = Address(inet.address, inet.address_length, 9042); + ASSERT_TRUE(output->is_valid_and_resolved()); } Session session; @@ -121,3 +124,45 @@ TEST_F(StatementUnitTest, SetHostWhereHostIsDown) { ASSERT_TRUE(future->error()); EXPECT_EQ(future->error()->code, CASS_ERROR_LIB_NO_HOSTS_AVAILABLE); } + +TEST_F(StatementUnitTest, ErrorBatchWithNamedParameters) { + mockssandra::SimpleCluster cluster(simple(), 1); + ASSERT_EQ(cluster.start_all(), 0); + + connect(); + + BatchRequest::Ptr batch(new BatchRequest(CASS_BATCH_TYPE_UNLOGGED)); + + Statement::Ptr request(new QueryRequest("SELECT * FROM does_not_matter WHERE key = ?", + 1)); // Space for a named parameter + + request->set("key", 42); // Use named parameters + + batch->add_statement(request.get()); + + ResponseFuture::Ptr future(session.execute(Request::ConstPtr(batch))); + future->wait(); + + ASSERT_TRUE(future->error()); + EXPECT_EQ(future->error()->code, CASS_ERROR_LIB_BAD_PARAMS); + EXPECT_EQ(future->error()->message, "Batches cannot contain queries with named values"); +} + +TEST_F(StatementUnitTest, ErrorParametersUnset) { + mockssandra::SimpleCluster cluster(simple(), 1); + ASSERT_EQ(cluster.start_all(), 0); + + Config config; + config.set_protocol_version(ProtocolVersion(3)); + + connect(config); + + Statement::Ptr request(new QueryRequest("SELECT * FROM does_not_matter WHERE key = ?", + 1)); // Parameters start as unset + + ResponseFuture::Ptr future(session.execute(Request::ConstPtr(request))); + future->wait(); + + ASSERT_TRUE(future->error()); + EXPECT_EQ(future->error()->code, CASS_ERROR_LIB_PARAMETER_UNSET); +} diff --git a/gtests/src/unit/tests/test_supported_response.cpp b/gtests/src/unit/tests/test_supported_response.cpp new file mode 100644 index 000000000..16911e498 --- /dev/null +++ b/gtests/src/unit/tests/test_supported_response.cpp @@ -0,0 +1,131 @@ +/* + Copyright (c) DataStax, Inc. + + This software can be used solely with DataStax Enterprise. Please consult the + license at http://www.datastax.com/terms/datastax-dse-driver-license-terms +*/ + +#include "loop_test.hpp" + +#include "options_request.hpp" +#include "request_callback.hpp" +#include "supported_response.hpp" + +using namespace datastax; +using namespace datastax::internal; +using namespace datastax::internal::core; + +class SupportedResponseUnitTest : public LoopTest { +public: + const mockssandra::RequestHandler* simple_cluster_with_options() { + mockssandra::SimpleRequestHandlerBuilder builder; + builder.on(mockssandra::OPCODE_OPTIONS).execute(new SupportedOptions()); + return builder.build(); + } + +public: + static void on_connect(Connector* connector, StringMultimap* supported_options) { + ASSERT_TRUE(connector->is_ok()); + *supported_options = connector->supported_options(); + } + +private: + class SupportedOptions : public mockssandra::Action { + public: + virtual void on_run(mockssandra::Request* request) const { + Vector compression; + Vector cql_version; + Vector protocol_versions; + compression.push_back("snappy"); + compression.push_back("lz4"); + cql_version.push_back("3.4.5"); + protocol_versions.push_back("3/v3"); + protocol_versions.push_back("4/v4"); + + StringMultimap supported; + supported["COMPRESSION"] = compression; + supported["CQL_VERSION"] = cql_version; + supported["PROTOCOL_VERSIONS"] = protocol_versions; + + String body; + mockssandra::encode_string_map(supported, &body); + request->write(mockssandra::OPCODE_SUPPORTED, body); + } + }; +}; + +TEST_F(SupportedResponseUnitTest, Simple) { + mockssandra::SimpleCluster cluster(simple_cluster_with_options()); + ASSERT_EQ(cluster.start_all(), 0); + + StringMultimap supported_options; + ASSERT_EQ(0, supported_options.size()); + Connector::Ptr connector(new Connector(Host::Ptr(new Host(Address("127.0.0.1", PORT))), + PROTOCOL_VERSION, + bind_callback(on_connect, &supported_options))); + connector->connect(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + + ASSERT_EQ(3u, supported_options.size()); + { + Vector compression = supported_options.find("COMPRESSION")->second; + ASSERT_EQ(2u, compression.size()); + EXPECT_EQ("snappy", compression[0]); + EXPECT_EQ("lz4", compression[1]); + } + { + Vector cql_version = supported_options.find("CQL_VERSION")->second; + ASSERT_EQ(1u, cql_version.size()); + EXPECT_EQ("3.4.5", cql_version[0]); + } + { + Vector protocol_versions = supported_options.find("PROTOCOL_VERSIONS")->second; + ASSERT_EQ(2u, protocol_versions.size()); + EXPECT_EQ("3/v3", protocol_versions[0]); + EXPECT_EQ("4/v4", protocol_versions[1]); + } + + { // Non-existent key + EXPECT_EQ(supported_options.end(), supported_options.find("invalid")); + } +} + +TEST_F(SupportedResponseUnitTest, UppercaseKeysOnly) { + class CaseInsensitiveSupportedOptions : public mockssandra::Action { + public: + virtual void on_run(mockssandra::Request* request) const { + Vector camel_key; + camel_key.push_back("success"); + + StringMultimap supported; + supported["CamEL_KeY"] = camel_key; + + String body; + mockssandra::encode_string_map(supported, &body); + request->write(mockssandra::OPCODE_SUPPORTED, body); + } + }; + + mockssandra::SimpleRequestHandlerBuilder builder; + builder.on(mockssandra::OPCODE_OPTIONS).execute(new CaseInsensitiveSupportedOptions()); + mockssandra::SimpleCluster cluster(builder.build()); + ASSERT_EQ(cluster.start_all(), 0); + + StringMultimap supported_options; + ASSERT_EQ(0, supported_options.size()); + Connector::Ptr connector(new Connector(Host::Ptr(new Host(Address("127.0.0.1", PORT))), + PROTOCOL_VERSION, + bind_callback(on_connect, &supported_options))); + connector->connect(loop()); + uv_run(loop(), UV_RUN_DEFAULT); + + ASSERT_EQ(1u, supported_options.size()); + { // Uppercase + Vector uppercase = supported_options.find("CAMEL_KEY")->second; + ASSERT_EQ(1u, uppercase.size()); + EXPECT_EQ("success", uppercase[0]); + } + { // Exact key + EXPECT_EQ(supported_options.end(), supported_options.find("CamEL_KeY")); + } +} diff --git a/gtests/src/unit/tests/test_tracing.cpp b/gtests/src/unit/tests/test_tracing.cpp index 1943e0eed..5b4dc9cc4 100644 --- a/gtests/src/unit/tests/test_tracing.cpp +++ b/gtests/src/unit/tests/test_tracing.cpp @@ -31,7 +31,7 @@ class TracingUnitTest : public Unit { void connect(const Config& config = Config()) { Config temp(config); - temp.contact_points().push_back("127.0.0.1"); + temp.contact_points().push_back(Address("127.0.0.1", 9042)); Future::Ptr connect_future(session.connect(temp)); ASSERT_TRUE(connect_future->wait_for(WAIT_FOR_TIME)) << "Timed out waiting for session to connect"; diff --git a/gtests/src/unit/tests/test_value.cpp b/gtests/src/unit/tests/test_value.cpp index cce7a06ce..949645f71 100644 --- a/gtests/src/unit/tests/test_value.cpp +++ b/gtests/src/unit/tests/test_value.cpp @@ -16,12 +16,15 @@ #include +#include "buffer.hpp" #include "cassandra.h" +#include "string.hpp" #include "value.hpp" #include using namespace datastax::internal::core; +using namespace datastax; // The following CassValue's are used in tests as "bad data". @@ -107,3 +110,81 @@ TEST(ValueUnitTest, BadDecimal) { EXPECT_EQ(cass_value_get_decimal(CassValue::to(&invalid_value), &varint, &varint_size, &scale), CASS_ERROR_LIB_NOT_ENOUGH_DATA); } + +TEST(ValueUnitTest, NullElementInCollectionList) { + const char input[12] = { + -1, -1, -1, -1, // Element 1 is NULL + 0, 0, 0, 4, 0, 0, 0, 2 // Size (int32_t) and contents of element 2 + }; + Decoder decoder(input, 12); + DataType::ConstPtr element_data_type(new DataType(CASS_VALUE_TYPE_INT)); + CollectionType::ConstPtr data_type = CollectionType::list(element_data_type, false); + Value value(data_type, 2, decoder); + ASSERT_EQ(cass_true, cass_value_is_collection(CassValue::to(&value))); + + CassIterator* it = cass_iterator_from_collection(CassValue::to(&value)); + EXPECT_EQ(cass_true, cass_iterator_next(it)); + const CassValue* element = cass_iterator_get_value(it); + EXPECT_EQ(cass_true, cass_value_is_null(element)); + cass_int32_t element_value; + EXPECT_EQ(cass_true, cass_iterator_next(it)); + EXPECT_EQ(CASS_OK, cass_value_get_int32(element, &element_value)); + EXPECT_EQ(2, element_value); + cass_iterator_free(it); +} + +TEST(ValueUnitTest, NullElementInCollectionMap) { + const char input[21] = { + -1, -1, -1, -1, // Key 1 is NULL + 0, 0, 0, 4, 0, 0, 0, 2, // Size (int32_t) and contents of value 1 + 0, 0, 0, 1, 'a', // Key 2 is a + -1, -1, -1, -1 // Value 2 is NULL + }; + Decoder decoder(input, 21); + DataType::ConstPtr key_data_type(new DataType(CASS_VALUE_TYPE_TEXT)); + DataType::ConstPtr value_data_type(new DataType(CASS_VALUE_TYPE_INT)); + CollectionType::ConstPtr data_type = CollectionType::map(key_data_type, value_data_type, false); + Value value(data_type, 2, decoder); + ASSERT_EQ(cass_true, cass_value_is_collection(CassValue::to(&value))); + + CassIterator* it = cass_iterator_from_collection(CassValue::to(&value)); + EXPECT_EQ(cass_true, cass_iterator_next(it)); + const CassValue* element = cass_iterator_get_value(it); + EXPECT_EQ(cass_true, cass_value_is_null(element)); + cass_int32_t value_value; + EXPECT_EQ(cass_true, cass_iterator_next(it)); + EXPECT_EQ(CASS_OK, cass_value_get_int32(element, &value_value)); + EXPECT_EQ(2, value_value); + + EXPECT_EQ(cass_true, cass_iterator_next(it)); + element = cass_iterator_get_value(it); + const char* key_value = NULL; + size_t key_value_length = 0; + EXPECT_EQ(CASS_OK, cass_value_get_string(element, &key_value, &key_value_length)); + EXPECT_EQ("a", String(key_value, key_value_length)); + EXPECT_EQ(cass_true, cass_iterator_next(it)); + EXPECT_EQ(cass_true, cass_value_is_null(element)); + cass_iterator_free(it); +} + +TEST(ValueUnitTest, NullElementInCollectionSet) { + const char input[12] = { + 0, 0, 0, 4, 0, 0, 0, 2, // Size (int32_t) and contents of element 1 + -1, -1, -1, -1, // Element 2 is NULL + }; + Decoder decoder(input, 12); + DataType::ConstPtr element_data_type(new DataType(CASS_VALUE_TYPE_INT)); + CollectionType::ConstPtr data_type = CollectionType::set(element_data_type, false); + Value value(data_type, 2, decoder); + ASSERT_EQ(cass_true, cass_value_is_collection(CassValue::to(&value))); + + CassIterator* it = cass_iterator_from_collection(CassValue::to(&value)); + EXPECT_EQ(cass_true, cass_iterator_next(it)); + const CassValue* element = cass_iterator_get_value(it); + cass_int32_t element_value; + EXPECT_EQ(CASS_OK, cass_value_get_int32(element, &element_value)); + EXPECT_EQ(2, element_value); + EXPECT_EQ(cass_true, cass_iterator_next(it)); + EXPECT_EQ(cass_true, cass_value_is_null(element)); + cass_iterator_free(it); +} diff --git a/include/cassandra.h b/include/cassandra.h index b2d1770b6..a22da5e3e 100644 --- a/include/cassandra.h +++ b/include/cassandra.h @@ -52,7 +52,7 @@ */ #define CASS_VERSION_MAJOR 2 -#define CASS_VERSION_MINOR 13 +#define CASS_VERSION_MINOR 14 #define CASS_VERSION_PATCH 0 #define CASS_VERSION_SUFFIX "" @@ -700,7 +700,8 @@ typedef enum CassErrorSource_ { XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_NO_PEER_CERT, 3, "No peer certificate") \ XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_INVALID_PEER_CERT, 4, "Invalid peer certificate") \ XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_IDENTITY_MISMATCH, 5, "Certificate does not match host or IP address") \ - XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_PROTOCOL_ERROR, 6, "Protocol error") + XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_PROTOCOL_ERROR, 6, "Protocol error") \ + XX(CASS_ERROR_SOURCE_SSL, CASS_ERROR_SSL_CLOSED, 7, "Connection closed") /* @cond IGNORE */ #define CASS_ERROR_MAP CASS_ERROR_MAPPING /* Deprecated */ @@ -2759,6 +2760,71 @@ cass_cluster_set_host_listener_callback(CassCluster* cluster, CassHostListenerCallback callback, void* data); +/** + * Sets the secure connection bundle path for processing DBaaS credentials. + * + * This will pre-configure a cluster using the credentials format provided by + * the DBaaS cloud provider. + * + * @param[in] cluster + * @param[in] path Absolute path to DBaaS credentials file. + * @return CASS_OK if successful, otherwise error occured. + */ +CASS_EXPORT CassError +cass_cluster_set_cloud_secure_connection_bundle(CassCluster* cluster, + const char* path); + +/** + * Same as cass_cluster_set_cloud_secure_connection_bundle(), but with lengths + * for string parameters. + * + * @see cass_cluster_set_cloud_secure_connection_bundle() + * + * @param[in] cluster + * @param[in] path Absolute path to DBaaS credentials file. + * @param[in] path_length Length of path variable. + * @return CASS_OK if successful, otherwise error occured. + */ +CASS_EXPORT CassError +cass_cluster_set_cloud_secure_connection_bundle_n(CassCluster* cluster, + const char* path, + size_t path_length); + +/** + * Same as cass_cluster_set_cloud_secure_connection_bundle(), but it does not + * initialize the underlying SSL library implementation. The SSL library still + * needs to be initialized, but it's up to the client application to handle + * initialization. This is similar to the function cass_ssl_new_no_lib_init(), + * and its documentation should be used as a reference to properly initialize + * the underlying SSL library. + * + * @see cass_ssl_new_no_lib_init() + * @see cass_cluster_set_cloud_secure_connection_bundle() + * + * @param[in] cluster + * @param[in] path Absolute path to DBaaS credentials file. + * @return CASS_OK if successful, otherwise error occured. + */ +CASS_EXPORT CassError +cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(CassCluster* cluster, + const char* path); + +/** + * Same as cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(), + * but with lengths for string parameters. + * + * @see cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init() + * + * @param[in] cluster + * @param[in] path Absolute path to DBaaS credentials file. + * @param[in] path_length Length of path variable. + * @return CASS_OK if successful, otherwise error occured. + */ +CASS_EXPORT CassError +cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(CassCluster* cluster, + const char* path, + size_t path_length); + /*********************************************************************************** * * Session diff --git a/packaging/debian/rules b/packaging/debian/rules index 62277dfe4..f4bcd60db 100755 --- a/packaging/debian/rules +++ b/packaging/debian/rules @@ -12,7 +12,8 @@ ifneq (,$(filter parallel=%,$(DEB_BUILD_OPTIONS))) endif export SOVER ?= $(shell dpkg-parsechangelog \ - | sed -rne 's/^Version: ([0-9.]+)[-+~].*$$/\1/p') + | sed -rne 's/^Version: ([0-9.]+)([-+~][[:alpha:]][[:alnum:]]*)?([-+~][[:digit:]])?$$/\1\2/p' \ + | sed 's/[+~]/-/') export SONAME=libcassandra.so.$(SOVER) %: diff --git a/src/address.cpp b/src/address.cpp index 5d36a2954..752bd9ae3 100644 --- a/src/address.cpp +++ b/src/address.cpp @@ -21,187 +21,161 @@ #include "row.hpp" #include "value.hpp" -#include -#include - using namespace datastax; using namespace datastax::internal::core; -const Address Address::EMPTY_KEY("0.0.0.0", 0); -const Address Address::DELETED_KEY("0.0.0.0", 1); - -const Address Address::BIND_ANY_IPV4("0.0.0.0", 0); -const Address Address::BIND_ANY_IPV6("::", 0); +const Address Address::EMPTY_KEY(String(), 0); +const Address Address::DELETED_KEY(String(), 1); -Address::Address() { memset(&addr_, 0, sizeof(addr_)); } +namespace { -Address::Address(const String& ip, int port) { - init(); - bool result = from_string(ip, port, this); - UNUSED_(result); - assert(result); +template +inline void hash_combine(std::size_t& seed, const T& v) { + SPARSEHASH_HASH hasher; + seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } -bool Address::from_string(const String& ip, int port, Address* output) { - char buf[sizeof(struct in6_addr)]; - if (uv_inet_pton(AF_INET, ip.c_str(), &buf) == 0) { - if (output != NULL) { - struct sockaddr_in addr; - uv_ip4_addr(ip.c_str(), port, &addr); - output->init(&addr); - } - return true; - } else if (uv_inet_pton(AF_INET6, ip.c_str(), &buf) == 0) { - if (output != NULL) { - struct sockaddr_in6 addr; - uv_ip6_addr(ip.c_str(), port, &addr); - output->init(&addr); - } - return true; +} // namespace + +Address::Address() + : family_(UNRESOLVED) + , port_(0) {} + +Address::Address(const Address& other, const String& server_name) + : hostname_or_address_(other.hostname_or_address_) + , server_name_(server_name) + , family_(other.family_) + , port_(other.port_) {} + +Address::Address(const String& hostname, int port, const String& server_name) + : server_name_(server_name) + , family_(UNRESOLVED) + , port_(port) { + char addr[16]; + if (uv_inet_pton(AF_INET, hostname.c_str(), addr) == 0) { + hostname_or_address_.assign(addr, addr + 4); + family_ = IPv4; + } else if (uv_inet_pton(AF_INET6, hostname.c_str(), addr) == 0) { + hostname_or_address_.assign(addr, addr + 16); + family_ = IPv6; } else { - return false; + hostname_or_address_ = hostname; } } -bool Address::from_inet(const void* data, size_t size, int port, Address* output) { - - if (size == 4) { - char buf[INET_ADDRSTRLEN]; - if (uv_inet_ntop(AF_INET, data, buf, sizeof(buf)) != 0) { - return false; - } - if (output != NULL) { - struct sockaddr_in addr; - uv_ip4_addr(buf, port, &addr); - output->init(&addr); - } - - return true; - } else if (size == 16) { - char buf[INET6_ADDRSTRLEN]; - if (uv_inet_ntop(AF_INET6, data, buf, sizeof(buf)) != 0) { - return false; - } - if (output != NULL) { - struct sockaddr_in6 addr; - uv_ip6_addr(buf, port, &addr); - output->init(&addr); - } - - return true; +Address::Address(const uint8_t* address, uint8_t address_length, int port) + : family_(UNRESOLVED) + , port_(port) { + if (address_length == 4) { + hostname_or_address_.assign(reinterpret_cast(address), address_length); + family_ = IPv4; + } else if (address_length == 16) { + hostname_or_address_.assign(reinterpret_cast(address), address_length); + family_ = IPv6; } - return false; } -bool Address::init(const sockaddr* addr) { +Address::Address(const struct sockaddr* addr) + : family_(UNRESOLVED) + , port_(0) { if (addr->sa_family == AF_INET) { - memcpy(addr_in(), addr, sizeof(struct sockaddr_in)); - return true; + const struct sockaddr_in* addr_in = reinterpret_cast(addr); + hostname_or_address_.assign(reinterpret_cast(&addr_in->sin_addr), 4); + port_ = ntohs(addr_in->sin_port); + family_ = IPv4; } else if (addr->sa_family == AF_INET6) { - memcpy(addr_in6(), addr, sizeof(struct sockaddr_in6)); - return true; + const struct sockaddr_in6* addr_in6 = reinterpret_cast(addr); + hostname_or_address_.assign(reinterpret_cast(&addr_in6->sin6_addr), 16); + port_ = ntohs(addr_in6->sin6_port); + family_ = IPv6; } - return false; } -void Address::init(const struct sockaddr_in* addr) { *addr_in() = *addr; } +bool Address::equals(const Address& other, bool with_port) const { + if (family_ != other.family_) return false; + if (with_port && port_ != other.port_) return false; + if (server_name_ != other.server_name_) return false; + if (hostname_or_address_ != other.hostname_or_address_) return false; + return true; +} -void Address::init(const struct sockaddr_in6* addr) { *addr_in6() = *addr; } +bool Address::operator<(const Address& other) const { + if (family_ != other.family_) return family_ < other.family_; + if (port_ != other.port_) return port_ < other.port_; + if (server_name_ != other.server_name_) return server_name_ < other.server_name_; + return hostname_or_address_ < other.hostname_or_address_; +} -int Address::port() const { - if (family() == AF_INET) { - return htons(addr_in()->sin_port); - } else if (family() == AF_INET6) { - return htons(addr_in6()->sin6_port); +String Address::hostname_or_address() const { + if (family_ == IPv4) { + char name[INET_ADDRSTRLEN + 1] = { '\0' }; + uv_inet_ntop(AF_INET, hostname_or_address_.data(), name, INET_ADDRSTRLEN); + return name; + } else if (family_ == IPv6) { + char name[INET6_ADDRSTRLEN + 1] = { '\0' }; + uv_inet_ntop(AF_INET6, hostname_or_address_.data(), name, INET6_ADDRSTRLEN); + return name; + } else { + return hostname_or_address_; } - return -1; } -String Address::to_string(bool with_port) const { - OStringStream ss; - char host[INET6_ADDRSTRLEN + 1] = { '\0' }; - if (family() == AF_INET) { - uv_ip4_name(const_cast(addr_in()), host, INET_ADDRSTRLEN); - ss << host; - if (with_port) ss << ":" << port(); - } else if (family() == AF_INET6) { - uv_ip6_name(const_cast(addr_in6()), host, INET6_ADDRSTRLEN); - if (with_port) ss << "["; - ss << host; - if (with_port) ss << "]:" << port(); - } - return ss.str(); +size_t Address::hash_code() const { + SPARSEHASH_HASH hasher; + size_t code = hasher(family_); + hash_combine(code, port_); + hash_combine(code, server_name_); + hash_combine(code, hostname_or_address_); + return code; } -uint8_t Address::to_inet(uint8_t* data) const { - if (family() == AF_INET) { - memcpy(data, &addr_in()->sin_addr, 4); - return 4; - } else if (family() == AF_INET6) { - memcpy(data, &addr_in6()->sin6_addr, 16); - return 16; +uint8_t Address::to_inet(void* address) const { + if (family_ == IPv4 || family_ == IPv6) { + size_t size = hostname_or_address_.size(); + assert((size == 4 || size == 16) && "Invalid size for address"); + hostname_or_address_.copy(reinterpret_cast(address), size); + return static_cast(size); } return 0; } -int Address::compare(const Address& a, bool with_port) const { - if (family() != a.family()) { - return family() < a.family() ? -1 : 1; - } - if (with_port && port() != a.port()) { - return port() < a.port() ? -1 : 1; - } - if (family() == AF_INET) { - if (addr_in()->sin_addr.s_addr != a.addr_in()->sin_addr.s_addr) { - return addr_in()->sin_addr.s_addr < a.addr_in()->sin_addr.s_addr ? -1 : 1; - } - } else if (family() == AF_INET6) { - return memcmp(&(addr_in6()->sin6_addr), &(a.addr_in6()->sin6_addr), - sizeof(addr_in6()->sin6_addr)); +const struct sockaddr* Address::to_sockaddr(SocketStorage* storage) const { + int rc = 0; + if (family_ == IPv4) { + char name[INET_ADDRSTRLEN + 1] = { '\0' }; + rc = uv_inet_ntop(AF_INET, hostname_or_address_.data(), name, INET_ADDRSTRLEN); + if (rc != 0) return NULL; + rc = uv_ip4_addr(name, port_, storage->addr_in()); + } else if (family_ == IPv6) { + char name[INET6_ADDRSTRLEN + 1] = { '\0' }; + rc = uv_inet_ntop(AF_INET6, hostname_or_address_.data(), name, INET6_ADDRSTRLEN); + if (rc != 0) return NULL; + rc = uv_ip6_addr(name, port_, storage->addr_in6()); + } else { + return NULL; } - return 0; + if (rc != 0) return NULL; + return storage->addr(); } -namespace datastax { namespace internal { namespace core { - -bool determine_address_for_peer_host(const Address& connected_address, const Value* peer_value, - const Value* rpc_value, Address* output) { - Address peer_address; - if (!peer_value || - !peer_value->decoder().as_inet(peer_value->size(), connected_address.port(), &peer_address)) { - LOG_WARN("Invalid address format for peer address"); - return false; - } - if (rpc_value && !rpc_value->is_null()) { - if (!rpc_value->decoder().as_inet(rpc_value->size(), connected_address.port(), output)) { - LOG_WARN("Invalid address format for rpc address"); - return false; - } - if (connected_address == *output || connected_address == peer_address) { - LOG_DEBUG("system.peers on %s contains a line with rpc_address for itself. " - "This is not normal, but is a known problem for some versions of DSE. " - "Ignoring this entry.", - connected_address.to_string(false).c_str()); - return false; - } - if (Address::BIND_ANY_IPV4.compare(*output, false) == 0 || - Address::BIND_ANY_IPV6.compare(*output, false) == 0) { - LOG_WARN("Found host with 'bind any' for rpc_address; using listen_address (%s) to contact " - "instead. " - "If this is incorrect you should configure a specific interface for rpc_address on " - "the server.", - peer_address.to_string(false).c_str()); - *output = peer_address; - } +String Address::to_string(bool with_port) const { + OStringStream ss; + if (family_ == IPv6 && with_port) { + ss << "[" << hostname_or_address() << "]"; } else { - LOG_WARN("No rpc_address for host %s in system.peers on %s. " - "Ignoring this entry.", - peer_address.to_string(false).c_str(), connected_address.to_string(false).c_str()); - return false; + ss << hostname_or_address(); } - return true; + if (with_port) { + ss << ":" << port_; + } + if (!server_name_.empty()) { + ss << " (" << server_name_ << ")"; + } + return ss.str(); } +namespace datastax { namespace internal { namespace core { + String determine_listen_address(const Address& address, const Row* row) { const Value* v = row->get_by_name("peer"); if (v != NULL) { diff --git a/src/address.hpp b/src/address.hpp index 77aaa6744..969ad03ad 100644 --- a/src/address.hpp +++ b/src/address.hpp @@ -18,130 +18,146 @@ #define DATASTAX_INTERNAL_ADDRESS_HPP #include "allocated.hpp" +#include "callback.hpp" #include "dense_hash_set.hpp" -#include "hash.hpp" #include "string.hpp" #include "vector.hpp" -#include -#include #include namespace datastax { namespace internal { namespace core { class Row; -class Value; class Address : public Allocated { public: static const Address EMPTY_KEY; static const Address DELETED_KEY; - static const Address BIND_ANY_IPV4; - static const Address BIND_ANY_IPV6; + enum Family { UNRESOLVED, IPv4, IPv6 }; - Address(); - Address(const String& ip, int port); // Tests only +#ifdef _WIN32 + struct SocketStorage { + struct sockaddr* addr() { + return reinterpret_cast(&storage); + } + struct sockaddr_in* addr_in() { + return reinterpret_cast(&storage); + } + struct sockaddr_in6* addr_in6() { + return reinterpret_cast(&storage); + } + struct sockaddr_storage storage; + }; +#else + struct SocketStorage { + struct sockaddr* addr() { + return &storage.addr; + } + struct sockaddr_in* addr_in() { + return &storage.addr_in; + } + struct sockaddr_in6* addr_in6() { + return &storage.addr_in6; + } + union { + struct sockaddr addr; + struct sockaddr_in addr_in; + struct sockaddr_in6 addr_in6; + } storage; + }; +#endif - static bool from_string(const String& ip, int port, Address* output = NULL); + Address(); + Address(const Address& other, const String& server_name); + Address(const String& hostname_or_address, int port, const String& server_name = String()); + Address(const uint8_t* address, uint8_t address_length, int port); + Address(const struct sockaddr* addr); - static bool from_inet(const void* data, size_t size, int port, Address* output = NULL); + bool equals(const Address& other, bool with_port = true) const; - bool init(const struct sockaddr* addr); + bool operator==(const Address& other) const { return equals(other); } + bool operator!=(const Address& other) const { return !equals(other); } + bool operator<(const Address& other) const; -#ifdef _WIN32 - const struct sockaddr* addr() const { return reinterpret_cast(&addr_); } - const struct sockaddr_in* addr_in() const { - return reinterpret_cast(&addr_); - } - const struct sockaddr_in6* addr_in6() const { - return reinterpret_cast(&addr_); - } -#else - const struct sockaddr* addr() const { return &addr_; } - const struct sockaddr_in* addr_in() const { return &addr_in_; } - const struct sockaddr_in6* addr_in6() const { return &addr_in6_; } -#endif +public: + String hostname_or_address() const; + const String& server_name() const { return server_name_; } + Family family() const { return family_; } + int port() const { return port_; } - bool is_valid() const { return family() == AF_INET || family() == AF_INET6; } - int family() const { return addr()->sa_family; } - int port() const; + bool is_valid() const { return !hostname_or_address_.empty(); } + bool is_resolved() const { return family_ == IPv4 || family_ == IPv6; } + bool is_valid_and_resolved() const { return is_valid() && is_resolved(); } +public: + size_t hash_code() const; + uint8_t to_inet(void* address) const; + const struct sockaddr* to_sockaddr(SocketStorage* storage) const; String to_string(bool with_port = false) const; - uint8_t to_inet(uint8_t* data) const; - - int compare(const Address& a, bool with_port = true) const; private: - void init() { addr()->sa_family = AF_UNSPEC; } - void init(const struct sockaddr_in* addr); - void init(const struct sockaddr_in6* addr); + String hostname_or_address_; + String server_name_; + Family family_; + int port_; +}; -#ifdef _WIN32 - struct sockaddr* addr() { - return reinterpret_cast(&addr_); - } - struct sockaddr_in* addr_in() { - return reinterpret_cast(&addr_); - } - struct sockaddr_in6* addr_in6() { - return reinterpret_cast(&addr_); - } +String determine_listen_address(const Address& address, const Row* row); - struct sockaddr_storage addr_; -#else - struct sockaddr* addr() { - return &addr_; - } - struct sockaddr_in* addr_in() { - return &addr_in_; - } - struct sockaddr_in6* addr_in6() { - return &addr_in6_; - } +}}} // namespace datastax::internal::core - union { - struct sockaddr addr_; - struct sockaddr_in addr_in_; - struct sockaddr_in6 addr_in6_; - }; +namespace std { + +#if defined(HASH_IN_TR1) && !defined(_WIN32) +namespace tr1 { #endif + +template <> +struct hash { + size_t operator()(const datastax::internal::core::Address& address) const { + return address.hash_code(); + } }; -struct AddressHash { - std::size_t operator()(const Address& a) const { - if (a.family() == AF_INET) { - return hash::fnv1a(reinterpret_cast(a.addr()), sizeof(struct sockaddr_in)); - } else if (a.family() == AF_INET6) { - return hash::fnv1a(reinterpret_cast(a.addr()), sizeof(struct sockaddr_in6)); - } - return 0; +template <> +struct hash { + size_t operator()(datastax::internal::core::Address::Family family) const { + return hasher(static_cast(family)); } + SPARSEHASH_HASH hasher; }; -typedef Vector
AddressVec; -class AddressSet : public DenseHashSet { +#if defined(HASH_IN_TR1) && !defined(_WIN32) +} // namespace tr1 +#endif + +} // namespace std + +namespace datastax { namespace internal { namespace core { + +class AddressSet : public DenseHashSet
{ public: AddressSet() { set_empty_key(Address::EMPTY_KEY); set_deleted_key(Address::DELETED_KEY); } }; +typedef Vector
AddressVec; -inline bool operator<(const Address& a, const Address& b) { return a.compare(b) < 0; } - -inline bool operator==(const Address& a, const Address& b) { return a.compare(b) == 0; } +}}} // namespace datastax::internal::core -inline bool operator!=(const Address& a, const Address& b) { return a.compare(b) != 0; } +namespace std { -inline std::ostream& operator<<(std::ostream& os, const Address& addr) { - return os << addr.to_string(); +inline std::ostream& operator<<(std::ostream& os, const datastax::internal::core::Address& a) { + return os << a.to_string(); } -inline std::ostream& operator<<(std::ostream& os, const AddressVec& v) { +inline std::ostream& operator<<(std::ostream& os, const datastax::internal::core::AddressVec& v) { os << "["; bool first = true; - for (AddressVec::const_iterator it = v.begin(), end = v.end(); it != end; ++it) { + for (datastax::internal::core::AddressVec::const_iterator it = v.begin(), end = v.end(); + it != end; ++it) { if (!first) os << ", "; first = false; os << *it; @@ -150,11 +166,6 @@ inline std::ostream& operator<<(std::ostream& os, const AddressVec& v) { return os; } -bool determine_address_for_peer_host(const Address& connected_address, const Value* peer_value, - const Value* rpc_value, Address* output); - -String determine_listen_address(const Address& address, const Row* row); - -}}} // namespace datastax::internal::core +} // namespace std #endif diff --git a/src/address_factory.cpp b/src/address_factory.cpp new file mode 100644 index 000000000..9ec274097 --- /dev/null +++ b/src/address_factory.cpp @@ -0,0 +1,80 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "address_factory.hpp" + +#include "row.hpp" + +using namespace datastax::internal::core; + +bool DefaultAddressFactory::create(const Row* peers_row, const Host::Ptr& connected_host, + Address* output) { + Address connected_address = connected_host->address(); + const Value* peer_value = peers_row->get_by_name("peer"); + const Value* rpc_value = peers_row->get_by_name("rpc_address"); + + Address peer_address; + if (!peer_value || + !peer_value->decoder().as_inet(peer_value->size(), connected_address.port(), &peer_address)) { + LOG_WARN("Invalid address format for peer address"); + return false; + } + if (rpc_value && !rpc_value->is_null()) { + if (!rpc_value->decoder().as_inet(rpc_value->size(), connected_address.port(), output)) { + LOG_WARN("Invalid address format for rpc address"); + return false; + } + if (connected_address == *output || connected_address == peer_address) { + LOG_DEBUG("system.peers on %s contains a line with rpc_address for itself. " + "This is not normal, but is a known problem for some versions of DSE. " + "Ignoring this entry.", + connected_address.to_string(false).c_str()); + return false; + } + if (Address("0.0.0.0", 0).equals(*output, false) || Address("::", 0).equals(*output, false)) { + LOG_WARN("Found host with 'bind any' for rpc_address; using listen_address (%s) to contact " + "instead. If this is incorrect you should configure a specific interface for " + "rpc_address on the server.", + peer_address.to_string(false).c_str()); + *output = peer_address; + } + } else { + LOG_WARN("No rpc_address for host %s in system.peers on %s. Ignoring this entry.", + peer_address.to_string(false).c_str(), connected_address.to_string(false).c_str()); + return false; + } + return true; +} + +bool SniAddressFactory::create(const Row* peers_row, const Host::Ptr& connected_host, + Address* output) { + CassUuid host_id; + if (!peers_row->get_uuid_by_name("host_id", &host_id)) { + // Attempt to get an peer address for the error log. + Address peer_address; + const Value* peer_value = peers_row->get_by_name("peer"); + if (!peer_value || !peer_value->decoder().as_inet( + peer_value->size(), connected_host->address().port(), &peer_address)) { + LOG_WARN("Invalid address format for peer address"); + } + LOG_ERROR("Invalid `host_id` for host. %s will be ignored.", + peer_address.is_valid() ? peer_address.to_string().c_str() : ""); + return false; + } + *output = Address(connected_host->address().hostname_or_address(), + connected_host->address().port(), to_string(host_id)); + return true; +} diff --git a/src/address_factory.hpp b/src/address_factory.hpp new file mode 100644 index 000000000..1310d5d14 --- /dev/null +++ b/src/address_factory.hpp @@ -0,0 +1,63 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef DATASTAX_INTERNAL_ADDRESS_FACTORY_HPP +#define DATASTAX_INTERNAL_ADDRESS_FACTORY_HPP + +#include "config.hpp" +#include "host.hpp" +#include "ref_counted.hpp" + +namespace datastax { namespace internal { namespace core { + +class Row; + +/** + * An interface for constructing `Address` from `system.local`/`system.peers` row data. + */ +class AddressFactory : public RefCounted { +public: + typedef SharedRefPtr Ptr; + virtual ~AddressFactory() {} + virtual bool create(const Row* peers_row, const Host::Ptr& connected_host, Address* output) = 0; +}; + +/** + * An address factory that creates `Address` using the `rpc_address` column. + */ +class DefaultAddressFactory : public AddressFactory { + virtual bool create(const Row* peers_row, const Host::Ptr& connected_host, Address* output); +}; + +/** + * An address factory that creates `Address` using the connected host's address and the `host_id` + * (for the SNI servername) column. + */ +class SniAddressFactory : public AddressFactory { + virtual bool create(const Row* peers_row, const Host::Ptr& connected_host, Address* output); +}; + +inline AddressFactory* create_address_factory_from_config(const Config& config) { + if (config.cloud_secure_connection_config().is_loaded()) { + return new SniAddressFactory(); + } else { + return new DefaultAddressFactory(); + } +} + +}}} // namespace datastax::internal::core + +#endif diff --git a/src/allocated.hpp b/src/allocated.hpp index 073dcd76e..51bc8fd09 100644 --- a/src/allocated.hpp +++ b/src/allocated.hpp @@ -30,6 +30,8 @@ class Allocated { void* operator new(size_t, void* p) { return p; } void* operator new[](size_t, void* p) { return p; } + void operator delete(void* ptr, void* p) {} + void operator delete[](void* ptr, void* p) {} }; template diff --git a/src/auth.cpp b/src/auth.cpp index 6d352b7cf..227e02b39 100644 --- a/src/auth.cpp +++ b/src/auth.cpp @@ -24,6 +24,12 @@ using namespace datastax; using namespace datastax::internal::core; +using namespace datastax::internal::enterprise; + +#define DSE_AUTHENTICATOR "com.datastax.bdp.cassandra.auth.DseAuthenticator" + +#define DSE_PLAINTEXT_AUTH_MECHANISM "PLAIN" +#define DSE_PLAINTEXT_AUTH_SERVER_INITIAL_CHALLENGE "PLAIN-START" extern "C" { @@ -95,6 +101,37 @@ bool PlainTextAuthenticator::success(const String& token) { return true; } +bool DsePlainTextAuthenticator::initial_response(String* response) { + if (class_name_ == DSE_AUTHENTICATOR) { + response->assign(DSE_PLAINTEXT_AUTH_MECHANISM); + return true; + } else { + return evaluate_challenge(DSE_PLAINTEXT_AUTH_SERVER_INITIAL_CHALLENGE, response); + } +} + +bool DsePlainTextAuthenticator::evaluate_challenge(const String& token, String* response) { + if (token != DSE_PLAINTEXT_AUTH_SERVER_INITIAL_CHALLENGE) { + LOG_ERROR("Invalid start token for DSE plaintext authenticator during challenge: '%s'", + token.c_str()); + return false; + } + + // Credentials are of the form "\0\0" + response->append(authorization_id_); + response->push_back('\0'); + response->append(username_); + response->push_back('\0'); + response->append(password_); + + return true; +} + +bool DsePlainTextAuthenticator::success(const String& token) { + // no-op + return true; +} + ExternalAuthenticator::ExternalAuthenticator(const Address& address, const String& hostname, const String& class_name, const CassAuthenticatorCallbacks* callbacks, diff --git a/src/auth.hpp b/src/auth.hpp index 3f6f4a4d0..0469a7498 100644 --- a/src/auth.hpp +++ b/src/auth.hpp @@ -166,6 +166,52 @@ class PlainTextAuthProvider : public AuthProvider { }}} // namespace datastax::internal::core +namespace datastax { namespace internal { namespace enterprise { + +class DsePlainTextAuthenticator : public core::Authenticator { +public: + DsePlainTextAuthenticator(const String& class_name, const String& username, + const String& password, const String& authorization_id) + : class_name_(class_name) + , username_(username) + , password_(password) + , authorization_id_(authorization_id) {} + + virtual bool initial_response(String* response); + virtual bool evaluate_challenge(const String& token, String* response); + virtual bool success(const String& token); + +private: + String class_name_; + String username_; + String password_; + String authorization_id_; +}; + +class DsePlainTextAuthProvider : public core::AuthProvider { +public: + DsePlainTextAuthProvider(const String& username, const String& password, + const String& authorization_id) + : AuthProvider("DsePlainTextAuthProvider") + , username_(username) + , password_(password) + , authorization_id_(authorization_id) {} + + virtual core::Authenticator::Ptr new_authenticator(const core::Address& address, + const String& hostname, + const String& class_name) const { + return core::Authenticator::Ptr( + new DsePlainTextAuthenticator(class_name, username_, password_, authorization_id_)); + } + +private: + String username_; + String password_; + String authorization_id_; +}; + +}}} // namespace datastax::internal::enterprise + EXTERNAL_TYPE(datastax::internal::core::ExternalAuthenticator, CassAuthenticator) #endif diff --git a/src/batch_request.cpp b/src/batch_request.cpp index ce21cd157..37a94010e 100644 --- a/src/batch_request.cpp +++ b/src/batch_request.cpp @@ -130,7 +130,7 @@ int BatchRequest::encode(ProtocolVersion version, RequestCallback* callback, Buffer buf(buf_size); size_t pos = buf.encode_byte(0, type_); - buf.encode_uint16(pos, statements().size()); + buf.encode_uint16(pos, static_cast(statements().size())); bufs->push_back(buf); length += buf_size; @@ -183,7 +183,7 @@ int BatchRequest::encode(ProtocolVersion version, RequestCallback* callback, if (version >= CASS_PROTOCOL_VERSION_V5) { pos = buf.encode_int32(pos, flags); } else { - pos = buf.encode_byte(pos, flags); + pos = buf.encode_byte(pos, static_cast(flags)); } if (callback->serial_consistency() != 0) { @@ -195,7 +195,7 @@ int BatchRequest::encode(ProtocolVersion version, RequestCallback* callback, } if (version.supports_set_keyspace() && !keyspace().empty()) { - pos = buf.encode_string(pos, keyspace().data(), keyspace().size()); + pos = buf.encode_string(pos, keyspace().data(), static_cast(keyspace().size())); } bufs->push_back(buf); diff --git a/src/batch_request.hpp b/src/batch_request.hpp index efeff0f4c..fc77953e7 100644 --- a/src/batch_request.hpp +++ b/src/batch_request.hpp @@ -33,11 +33,12 @@ class ExecuteRequest; class BatchRequest : public RoutableRequest { public: + typedef SharedRefPtr Ptr; typedef Vector StatementVec; - BatchRequest(uint8_t type_) + BatchRequest(uint8_t type) : RoutableRequest(CQL_OPCODE_BATCH) - , type_(type_) {} + , type_(type) {} uint8_t type() const { return type_; } diff --git a/src/blacklist_policy.cpp b/src/blacklist_policy.cpp index c2e0d5aa0..61d367a71 100644 --- a/src/blacklist_policy.cpp +++ b/src/blacklist_policy.cpp @@ -19,7 +19,7 @@ using namespace datastax::internal::core; bool BlacklistPolicy::is_valid_host(const Host::Ptr& host) const { - const String& host_address = host->address().to_string(false); + const String& host_address = host->address().hostname_or_address(); for (ContactPointList::const_iterator it = hosts_.begin(), end = hosts_.end(); it != end; ++it) { if (host_address.compare(*it) == 0) { return false; diff --git a/src/callback.hpp b/src/callback.hpp index dc8dc3d56..1e59b11f4 100644 --- a/src/callback.hpp +++ b/src/callback.hpp @@ -65,7 +65,7 @@ class Callback { return *this; } - operator bool() const { return invoker_; } + operator bool() const { return invoker_ != NULL; } R operator()(const Arg& arg) const { return invoker_->invoke(arg); } diff --git a/src/cloud_secure_connection_config.cpp b/src/cloud_secure_connection_config.cpp new file mode 100644 index 000000000..a633e87a1 --- /dev/null +++ b/src/cloud_secure_connection_config.cpp @@ -0,0 +1,318 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "cloud_secure_connection_config.hpp" + +#include "auth.hpp" +#include "cluster.hpp" +#include "cluster_metadata_resolver.hpp" +#include "config.hpp" +#include "http_client.hpp" +#include "json.hpp" +#include "logger.hpp" +#include "ssl.hpp" +#include "utils.hpp" + +using namespace datastax; +using namespace datastax::internal; +using namespace datastax::internal::core; + +#define CLOUD_ERROR "Unable to load cloud secure connection configuration: " +#define METADATA_SERVER_ERROR "Unable to configure driver from metadata server: " + +// Pinned to v1 because that's what the driver currently handles. +#define METADATA_SERVER_PATH "/metadata?version=1" + +#define METADATA_SERVER_PORT 30443 +#define RESPONSE_BODY_TRUNCATE_LENGTH 1024 + +#ifdef HAVE_ZLIB +#include "unzip.h" + +#define CONFIGURATION_FILE "config.json" +#define CERTIFICATE_AUTHORITY_FILE "ca.crt" +#define CERTIFICATE_FILE "cert" +#define KEY_FILE "key" + +class UnzipFile { +public: + UnzipFile() + : file(NULL) {} + + ~UnzipFile() { unzClose(file); } + + bool open(const String& filename) { return (file = unzOpen(filename.c_str())) != NULL; } + + bool read_contents(const String& filename, String* contents) { + int rc = unzLocateFile(file, filename.c_str(), 0); + if (rc != UNZ_OK) { + return false; + } + + rc = unzOpenCurrentFile(file); + if (rc != UNZ_OK) { + return false; + } + + unz_file_info file_info; + rc = unzGetCurrentFileInfo(file, &file_info, 0, 0, 0, 0, 0, 0); + if (rc != UNZ_OK) { + unzCloseCurrentFile(file); + return false; + } + + contents->resize(file_info.uncompressed_size, 0); + unzReadCurrentFile(file, &(*contents)[0], contents->size()); + unzCloseCurrentFile(file); + + return true; + } + +private: + unzFile file; +}; +#endif + +namespace { + +class CloudClusterMetadataResolver : public ClusterMetadataResolver { +public: + CloudClusterMetadataResolver(const String& host, int port, const SocketSettings& settings, + uint64_t request_timeout_ms) + : client_(new HttpClient(Address(host, port), METADATA_SERVER_PATH, + bind_callback(&CloudClusterMetadataResolver::on_response, this))) { + client_->with_settings(settings)->with_request_timeout_ms(request_timeout_ms); + } + +private: + virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) { + inc_ref(); + client_->request(loop); + } + + virtual void internal_cancel() { client_->cancel(); } + +private: + void on_response(HttpClient* http_client) { + if (http_client->is_ok()) { + if (http_client->content_type().find("json") != std::string::npos) { + parse_metadata(http_client->response_body()); + } else { + LOG_ERROR(METADATA_SERVER_ERROR "Invalid response content type: '%s'", + http_client->content_type().c_str()); + } + } else if (!http_client->is_canceled()) { + if (http_client->is_error_status_code()) { + String error_message = + http_client->response_body().substr(0, RESPONSE_BODY_TRUNCATE_LENGTH); + if (http_client->content_type().find("json") != std::string::npos) { + json::Document document; + document.Parse(http_client->response_body().c_str()); + if (document.IsObject() && document.HasMember("message") && + document["message"].IsString()) { + error_message = document["message"].GetString(); + } + } + LOG_ERROR(METADATA_SERVER_ERROR "Returned error response code %u: '%s'", + http_client->status_code(), error_message.c_str()); + } else { + LOG_ERROR(METADATA_SERVER_ERROR "%s", http_client->error_message().c_str()); + } + } + + callback_(this); + dec_ref(); + } + + void parse_metadata(const String& response_body) { + json::Document document; + document.Parse(response_body.c_str()); + + if (!document.IsObject()) { + LOG_ERROR(METADATA_SERVER_ERROR "Metadata JSON is invalid"); + return; + } + + if (!document.HasMember("contact_info") || !document["contact_info"].IsObject()) { + LOG_ERROR(METADATA_SERVER_ERROR "Contact information is not available"); + return; + } + + const json::Value& contact_info = document["contact_info"]; + + if (!contact_info.HasMember("local_dc") || !contact_info["local_dc"].IsString()) { + LOG_ERROR(METADATA_SERVER_ERROR "Local DC is not available"); + return; + } + + local_dc_ = contact_info["local_dc"].GetString(); + + if (!contact_info.HasMember("sni_proxy_address") || + !contact_info["sni_proxy_address"].IsString()) { + LOG_ERROR(METADATA_SERVER_ERROR "SNI proxy address is not available"); + return; + } + + int sni_port = METADATA_SERVER_PORT; + Vector tokens; + explode(contact_info["sni_proxy_address"].GetString(), tokens, ':'); + String sni_address = tokens[0]; + if (tokens.size() == 2) { + IStringStream ss(tokens[1]); + if ((ss >> sni_port).fail()) { + LOG_WARN(METADATA_SERVER_ERROR "Invalid port, default %d will be used", + METADATA_SERVER_PORT); + } + } + + if (!contact_info.HasMember("contact_points") || !contact_info["contact_points"].IsArray()) { + LOG_ERROR(METADATA_SERVER_ERROR "Contact points are not available"); + return; + } + + const json::Value& contact_points = contact_info["contact_points"]; + for (rapidjson::SizeType i = 0; i < contact_points.Size(); ++i) { + if (contact_points[i].IsString()) { + String host_id = contact_points[i].GetString(); + resolved_contact_points_.push_back(Address(sni_address, sni_port, host_id)); + } + } + } + +private: + HttpClient::Ptr client_; +}; + +class CloudClusterMetadataResolverFactory : public ClusterMetadataResolverFactory { +public: + CloudClusterMetadataResolverFactory(const String& host, int port) + : host_(host) + , port_(port) {} + + virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const { + return ClusterMetadataResolver::Ptr(new CloudClusterMetadataResolver( + host_, port_, settings.control_connection_settings.connection_settings.socket_settings, + settings.control_connection_settings.connection_settings.connect_timeout_ms)); + } + + virtual const char* name() const { return "Cloud"; } + +private: + String host_; + int port_; +}; + +} // namespace + +CloudSecureConnectionConfig::CloudSecureConnectionConfig() + : is_loaded_(false) + , port_(0) {} + +bool CloudSecureConnectionConfig::load(const String& filename, Config* config /* = NULL */) { +#ifndef HAVE_ZLIB + LOG_ERROR(CLOUD_ERROR "Driver was not built with zlib support"); + return false; +#else + UnzipFile zip_file; + if (!zip_file.open(filename.c_str())) { + LOG_ERROR(CLOUD_ERROR "Unable to open zip file %s; file does not exist or is invalid", + filename.c_str()); + return false; + } + + String contents; + if (!zip_file.read_contents(CONFIGURATION_FILE, &contents)) { + LOG_ERROR(CLOUD_ERROR "Missing configuration file %s", CONFIGURATION_FILE); + return false; + } + + json::MemoryStream memory_stream(contents.c_str(), contents.size()); + json::AutoUTFMemoryInputStream auto_utf_stream(memory_stream); + json::Document document; + document.ParseStream(auto_utf_stream); + if (!document.IsObject()) { + LOG_ERROR(CLOUD_ERROR "Invalid configuration"); + return false; + } + + if (document.HasMember("username") && document["username"].IsString()) { + username_ = document["username"].GetString(); + } + if (document.HasMember("password") && document["password"].IsString()) { + password_ = document["password"].GetString(); + } + + if (config && (!username_.empty() || !password_.empty())) { + config->set_auth_provider( + AuthProvider::Ptr(new enterprise::DsePlainTextAuthProvider(username_, password_, ""))); + } + + if (!document.HasMember("host") || !document["host"].IsString()) { + LOG_ERROR(CLOUD_ERROR "Missing host"); + return false; + } + if (!document.HasMember("port") || !document["port"].IsInt()) { + LOG_ERROR(CLOUD_ERROR "Missing port"); + return false; + } + host_ = document["host"].GetString(); + port_ = document["port"].GetInt(); + + if (!zip_file.read_contents(CERTIFICATE_AUTHORITY_FILE, &ca_cert_)) { + LOG_ERROR(CLOUD_ERROR "Missing certificate authority file %s", CERTIFICATE_AUTHORITY_FILE); + return false; + } + + if (!zip_file.read_contents(CERTIFICATE_FILE, &cert_)) { + LOG_ERROR(CLOUD_ERROR "Missing certificate file %s", CERTIFICATE_FILE); + return false; + } + + if (!zip_file.read_contents(KEY_FILE, &key_)) { + LOG_ERROR(CLOUD_ERROR "Missing key file %s", KEY_FILE); + return false; + } + + if (config) { + SslContext::Ptr ssl_context(SslContextFactory::create()); + + ssl_context->set_verify_flags(CASS_SSL_VERIFY_PEER_CERT | CASS_SSL_VERIFY_PEER_IDENTITY_DNS); + + if (ssl_context->add_trusted_cert(ca_cert_.c_str(), ca_cert_.length()) != CASS_OK) { + LOG_ERROR(CLOUD_ERROR "Invalid CA certificate %s", CERTIFICATE_AUTHORITY_FILE); + return false; + } + + if (ssl_context->set_cert(cert_.c_str(), cert_.length()) != CASS_OK) { + LOG_ERROR(CLOUD_ERROR "Invalid client certificate %s", CERTIFICATE_FILE); + return false; + } + + if (ssl_context->set_private_key(key_.c_str(), key_.length(), NULL, 0) != CASS_OK) { + LOG_ERROR(CLOUD_ERROR "Invalid client private key %s", KEY_FILE); + return false; + } + + config->set_ssl_context(ssl_context); + + config->set_cluster_metadata_resolver_factory( + ClusterMetadataResolverFactory::Ptr(new CloudClusterMetadataResolverFactory(host_, port_))); + } + + is_loaded_ = true; + return true; +#endif +} diff --git a/src/cloud_secure_connection_config.hpp b/src/cloud_secure_connection_config.hpp new file mode 100644 index 000000000..8bda008d1 --- /dev/null +++ b/src/cloud_secure_connection_config.hpp @@ -0,0 +1,55 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef DATASTAX_INTERNAL_CLOUD_SECURE_CONNECTION_CONFIG_HPP +#define DATASTAX_INTERNAL_CLOUD_SECURE_CONNECTION_CONFIG_HPP + +#include "string.hpp" + +namespace datastax { namespace internal { namespace core { + +class Config; + +class CloudSecureConnectionConfig { +public: + CloudSecureConnectionConfig(); + + bool load(const String& filename, Config* config = NULL); + bool is_loaded() const { return is_loaded_; } + + const String& username() const { return username_; } + const String& password() const { return password_; } + const String& host() const { return host_; } + int port() const { return port_; } + + const String& ca_cert() const { return ca_cert_; } + const String& cert() const { return cert_; } + const String& key() const { return key_; } + +private: + bool is_loaded_; + String username_; + String password_; + String host_; + int port_; + String ca_cert_; + String cert_; + String key_; +}; + +}}} // namespace datastax::internal::core + +#endif diff --git a/src/cluster.cpp b/src/cluster.cpp index b6c49e544..f03eaabd0 100644 --- a/src/cluster.cpp +++ b/src/cluster.cpp @@ -201,7 +201,8 @@ ClusterSettings::ClusterSettings() , reconnection_policy(new ExponentialReconnectionPolicy()) , prepare_on_up_or_add_host(CASS_DEFAULT_PREPARE_ON_UP_OR_ADD_HOST) , max_prepares_per_flush(CASS_DEFAULT_MAX_PREPARES_PER_FLUSH) - , disable_events_on_startup(false) { + , disable_events_on_startup(false) + , cluster_metadata_resolver_factory(new DefaultClusterMetadataResolverFactory()) { load_balancing_policies.push_back(load_balancing_policy); } @@ -213,14 +214,15 @@ ClusterSettings::ClusterSettings(const Config& config) , reconnection_policy(config.reconnection_policy()) , prepare_on_up_or_add_host(config.prepare_on_up_or_add_host()) , max_prepares_per_flush(CASS_DEFAULT_MAX_PREPARES_PER_FLUSH) - , disable_events_on_startup(false) {} + , disable_events_on_startup(false) + , cluster_metadata_resolver_factory(config.cluster_metadata_resolver_factory()) {} Cluster::Cluster(const ControlConnection::Ptr& connection, ClusterListener* listener, EventLoop* event_loop, const Host::Ptr& connected_host, const HostMap& hosts, const ControlConnectionSchema& schema, const LoadBalancingPolicy::Ptr& load_balancing_policy, - const LoadBalancingPolicy::Vec& load_balancing_policies, - const ClusterSettings& settings) + const LoadBalancingPolicy::Vec& load_balancing_policies, const String& local_dc, + const StringMultimap& supported_options, const ClusterSettings& settings) : connection_(connection) , listener_(listener ? listener : &nop_cluster_listener__) , event_loop_(event_loop) @@ -230,6 +232,8 @@ Cluster::Cluster(const ControlConnection::Ptr& connection, ClusterListener* list , is_closing_(false) , connected_host_(connected_host) , hosts_(hosts) + , local_dc_(local_dc) + , supported_options_(supported_options) , is_recording_events_(settings.disable_events_on_startup) { inc_ref(); connection_->set_listener(this); @@ -357,7 +361,7 @@ void Cluster::update_schema(const ControlConnectionSchema& schema) { void Cluster::update_token_map(const HostMap& hosts, const String& partitioner, const ControlConnectionSchema& schema) { - if (settings_.control_connection_settings.token_aware_routing && schema.keyspaces) { + if (settings_.control_connection_settings.use_token_aware_routing && schema.keyspaces) { // Create a new token map and populate it token_map_ = TokenMap::from_partitioner(partitioner); if (!token_map_) { diff --git a/src/cluster.hpp b/src/cluster.hpp index 5fab3f645..4822164d6 100644 --- a/src/cluster.hpp +++ b/src/cluster.hpp @@ -213,6 +213,13 @@ struct ClusterSettings { * started by calling `Cluster::start_events()`. */ bool disable_events_on_startup; + + /** + * A factory for creating cluster metadata resolvers. A cluster metadata resolver is used to + * determine contact points and retrieve other metadata required to connect the + * cluster. + */ + ClusterMetadataResolverFactory::Ptr cluster_metadata_resolver_factory; }; /** @@ -241,6 +248,9 @@ class Cluster * @param load_balancing_policy The default load balancing policy to use for * determining the next control connection host. * @param load_balancing_policies + * @param local_dc The local datacenter determined by the metadata service for initializing the + * load balancing policies. + * @param supported_options Supported options discovered during control connection. * @param settings The control connection settings to use for reconnecting the * control connection. */ @@ -248,7 +258,8 @@ class Cluster EventLoop* event_loop, const Host::Ptr& connected_host, const HostMap& hosts, const ControlConnectionSchema& schema, const LoadBalancingPolicy::Ptr& load_balancing_policy, - const LoadBalancingPolicy::Vec& load_balancing_policies, const ClusterSettings& settings); + const LoadBalancingPolicy::Vec& load_balancing_policies, const String& local_dc, + const StringMultimap& supported_options, const ClusterSettings& settings); /** * Set the listener that will handle events for the cluster @@ -341,7 +352,9 @@ class Cluster ProtocolVersion protocol_version() const { return connection_->protocol_version(); } const Host::Ptr& connected_host() const { return connected_host_; } const TokenMap::Ptr& token_map() const { return token_map_; } + const String& local_dc() const { return local_dc_; } const VersionNumber& dse_server_version() const { return connection_->dse_server_version(); } + const StringMultimap& supported_options() const { return supported_options_; } private: friend class ClusterRunClose; @@ -426,6 +439,8 @@ class Cluster Metadata metadata_; PreparedMetadata prepared_metadata_; TokenMap::Ptr token_map_; + String local_dc_; + StringMultimap supported_options_; Timer timer_; bool is_recording_events_; ClusterEvent::Vec recorded_events_; diff --git a/src/cluster_config.cpp b/src/cluster_config.cpp index 9e1dba162..fda24f978 100644 --- a/src/cluster_config.cpp +++ b/src/cluster_config.cpp @@ -17,6 +17,7 @@ #include "cluster_config.hpp" using namespace datastax; +using namespace datastax::internal; using namespace datastax::internal::core; extern "C" { @@ -26,13 +27,21 @@ CassCluster* cass_cluster_new() { return CassCluster::to(new ClusterConfig()); } CassError cass_cluster_set_port(CassCluster* cluster, int port) { if (port <= 0) { return CASS_ERROR_LIB_BAD_PARAMS; + } else if (cluster->config().cloud_secure_connection_config().is_loaded()) { + LOG_ERROR("Port cannot be overridden with cloud secure connection bundle"); + return CASS_ERROR_LIB_BAD_PARAMS; } + cluster->config().set_port(port); return CASS_OK; } void cass_cluster_set_ssl(CassCluster* cluster, CassSsl* ssl) { - cluster->config().set_ssl_context(ssl->from()); + if (cluster->config().cloud_secure_connection_config().is_loaded()) { + LOG_ERROR("SSL context cannot be overridden with cloud secure connection bundle"); + } else { + cluster->config().set_ssl_context(ssl->from()); + } } CassError cass_cluster_set_protocol_version(CassCluster* cluster, int protocol_version) { @@ -100,10 +109,20 @@ CassError cass_cluster_set_contact_points(CassCluster* cluster, const char* cont CassError cass_cluster_set_contact_points_n(CassCluster* cluster, const char* contact_points, size_t contact_points_length) { + if (cluster->config().cloud_secure_connection_config().is_loaded()) { + LOG_ERROR("Contact points cannot be overridden with cloud secure connection bundle"); + return CASS_ERROR_LIB_BAD_PARAMS; + } + if (contact_points_length == 0) { cluster->config().contact_points().clear(); } else { - explode(String(contact_points, contact_points_length), cluster->config().contact_points()); + Vector exploded; + explode(String(contact_points, contact_points_length), exploded); + for (Vector::const_iterator it = exploded.begin(), end = exploded.end(); it != end; + ++it) { + cluster->config().contact_points().push_back(Address(*it, -1)); + } } return CASS_OK; } @@ -393,7 +412,7 @@ CassError cass_cluster_set_use_hostname_resolution(CassCluster* cluster, cass_bo CassError cass_cluster_set_use_randomized_contact_points(CassCluster* cluster, cass_bool_t enabled) { - cluster->config().set_use_randomized_contact_points(enabled); + cluster->config().set_use_randomized_contact_points(enabled == cass_true); return CASS_OK; } @@ -448,12 +467,15 @@ CassError cass_cluster_set_local_address(CassCluster* cluster, const char* name) CassError cass_cluster_set_local_address_n(CassCluster* cluster, const char* name, size_t name_length) { - Address address; // default to AF_UNSPEC - if (name_length == 0 || name == NULL || - Address::from_string(String(name, name_length), 0, &address)) { - cluster->config().set_local_address(address); + if (name_length == 0 || name == NULL) { + cluster->config().set_local_address(Address()); } else { - return CASS_ERROR_LIB_HOST_RESOLUTION; + Address address(String(name, name_length), 0); + if (address.is_valid_and_resolved()) { + cluster->config().set_local_address(address); + } else { + return CASS_ERROR_LIB_HOST_RESOLUTION; + } } return CASS_OK; } @@ -470,6 +492,53 @@ CassError cass_cluster_set_host_listener_callback(CassCluster* cluster, return CASS_OK; } +CassError cass_cluster_set_cloud_secure_connection_bundle(CassCluster* cluster, const char* path) { + return cass_cluster_set_cloud_secure_connection_bundle_n(cluster, path, SAFE_STRLEN(path)); +} + +CassError cass_cluster_set_cloud_secure_connection_bundle_n(CassCluster* cluster, const char* path, + size_t path_length) { + if (cluster->config().contact_points().empty() && !cluster->config().ssl_context()) { + SslContextFactory::init_once(); + } + return cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(cluster, path, + path_length); +} + +CassError cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init(CassCluster* cluster, + const char* path) { + return cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(cluster, path, + SAFE_STRLEN(path)); +} + +CassError cass_cluster_set_cloud_secure_connection_bundle_no_ssl_lib_init_n(CassCluster* cluster, + const char* path, + size_t path_length) { + const AddressVec& contact_points = cluster->config().contact_points(); + const SslContext::Ptr& ssl_context = cluster->config().ssl_context(); + if (!contact_points.empty() || ssl_context) { + String message; + if (!cluster->config().contact_points().empty()) { + message.append("Contact points"); + } + if (cluster->config().ssl_context()) { + if (!message.empty()) { + message.append(" and "); + } + message.append("SSL context"); + } + message.append(" must not be specified with cloud secure connection bundle"); + LOG_ERROR("%s", message.c_str()); + + return CASS_ERROR_LIB_BAD_PARAMS; + } + + if (!cluster->config().set_cloud_secure_connection_bundle(String(path, path_length))) { + return CASS_ERROR_LIB_BAD_PARAMS; + } + return CASS_OK; +} + void cass_cluster_free(CassCluster* cluster) { delete cluster->from(); } } // extern "C" diff --git a/src/cluster_connector.cpp b/src/cluster_connector.cpp index f13cb4a80..2c194bd21 100644 --- a/src/cluster_connector.cpp +++ b/src/cluster_connector.cpp @@ -56,7 +56,7 @@ class RunCancelCluster : public Task { }}} // namespace datastax::internal::core -ClusterConnector::ClusterConnector(const ContactPointList& contact_points, +ClusterConnector::ClusterConnector(const AddressVec& contact_points, ProtocolVersion protocol_version, const Callback& callback) : remaining_connector_count_(0) , contact_points_(contact_points) @@ -104,32 +104,14 @@ Cluster::Ptr ClusterConnector::release_cluster() { void ClusterConnector::internal_resolve_and_connect() { inc_ref(); - if (random_) { + if (random_ && !contact_points_.empty()) { random_shuffle(contact_points_.begin(), contact_points_.end(), random_); } - for (ContactPointList::const_iterator it = contact_points_.begin(), end = contact_points_.end(); - it != end; ++it) { - const String& contact_point = *it; - Address address; - // Attempt to parse the contact point string. If it's an IP address - // then immediately add it to our resolved contact points, otherwise - // attempt to resolve the string as a hostname. - if (Address::from_string(contact_point, settings_.port, &address)) { - contact_points_resolved_.push_back(address); - } else { - if (!resolver_) { - resolver_.reset(new MultiResolver(bind_callback(&ClusterConnector::on_resolve, this))); - } - resolver_->resolve(event_loop_->loop(), contact_point, settings_.port, - settings_.control_connection_settings.connection_settings.socket_settings - .resolve_timeout_ms); - } - } + resolver_ = settings_.cluster_metadata_resolver_factory->new_instance(settings_); - if (!resolver_) { - internal_connect_all(); - } + resolver_->resolve(event_loop_->loop(), contact_points_, + bind_callback(&ClusterConnector::on_resolve, this)); } void ClusterConnector::internal_connect(const Address& address, ProtocolVersion version) { @@ -142,21 +124,6 @@ void ClusterConnector::internal_connect(const Address& address, ProtocolVersion ->connect(event_loop_->loop()); } -void ClusterConnector::internal_connect_all() { - if (contact_points_resolved_.empty()) { - error_code_ = CLUSTER_ERROR_NO_HOSTS_AVAILABLE; - error_message_ = "Unable to connect to any contact points"; - finish(); - return; - } - remaining_connector_count_ = contact_points_resolved_.size(); - for (AddressVec::const_iterator it = contact_points_resolved_.begin(), - end = contact_points_resolved_.end(); - it != end; ++it) { - internal_connect(*it, protocol_version_); - } -} - void ClusterConnector::internal_cancel() { error_code_ = CLUSTER_CANCELED; if (resolver_) resolver_->cancel(); @@ -194,37 +161,28 @@ void ClusterConnector::on_error(ClusterConnector::ClusterError code, const Strin maybe_finish(); } -void ClusterConnector::on_resolve(MultiResolver* resolver) { +void ClusterConnector::on_resolve(ClusterMetadataResolver* resolver) { if (is_canceled()) { finish(); return; } - const Resolver::Vec& resolvers = resolver->resolvers(); - for (Resolver::Vec::const_iterator it = resolvers.begin(), end = resolvers.end(); it != end; - ++it) { - const Resolver::Ptr resolver(*it); - if (resolver->is_success()) { - const AddressVec& addresses = resolver->addresses(); - if (!addresses.empty()) { - for (AddressVec::const_iterator it = addresses.begin(), end = addresses.end(); it != end; - ++it) { - contact_points_resolved_.push_back(*it); - } - } else { - LOG_ERROR("No addresses resolved for %s:%d\n", resolver->hostname().c_str(), - resolver->port()); - } - } else if (resolver->is_timed_out()) { - LOG_ERROR("Timed out attempting to resolve address for %s:%d\n", resolver->hostname().c_str(), - resolver->port()); - } else if (!resolver->is_canceled()) { - LOG_ERROR("Unable to resolve address for %s:%d\n", resolver->hostname().c_str(), - resolver->port()); - } + const AddressVec& resolved_contact_points(resolver->resolved_contact_points()); + + if (resolved_contact_points.empty()) { + error_code_ = CLUSTER_ERROR_NO_HOSTS_AVAILABLE; + error_message_ = "Unable to connect to any contact points"; + finish(); + return; } - internal_connect_all(); + local_dc_ = resolver->local_dc(); + remaining_connector_count_ = resolved_contact_points.size(); + for (AddressVec::const_iterator it = resolved_contact_points.begin(), + end = resolved_contact_points.end(); + it != end; ++it) { + internal_connect(*it, protocol_version_); + } } void ClusterConnector::on_connect(ControlConnector* connector) { @@ -272,7 +230,7 @@ void ClusterConnector::on_connect(ControlConnector* connector) { for (LoadBalancingPolicy::Vec::const_iterator it = policies.begin(), end = policies.end(); it != end; ++it) { LoadBalancingPolicy::Ptr policy(*it); - policy->init(connected_host, hosts, random_); + policy->init(connected_host, hosts, random_, local_dc_); policy->register_handles(event_loop_->loop()); } @@ -299,7 +257,7 @@ void ClusterConnector::on_connect(ControlConnector* connector) { cluster_.reset(new Cluster(connector->release_connection(), listener_, event_loop_, connected_host, hosts, connector->schema(), default_policy, policies, - settings_)); + local_dc_, connector->supported_options(), settings_)); // Clear any connection errors and set the final negotiated protocol version. error_code_ = CLUSTER_OK; diff --git a/src/cluster_connector.hpp b/src/cluster_connector.hpp index 70d943ad4..e960fa058 100644 --- a/src/cluster_connector.hpp +++ b/src/cluster_connector.hpp @@ -19,6 +19,7 @@ #include "callback.hpp" #include "cluster.hpp" +#include "cluster_metadata_resolver.hpp" #include "resolver.hpp" namespace datastax { namespace internal { @@ -60,7 +61,7 @@ class ClusterConnector : public RefCounted { * @param callback A callback that is called when a connection to a contact * point is established, if an error occurred, or all contact points failed. */ - ClusterConnector(const ContactPointList& contact_points, ProtocolVersion protocol_version, + ClusterConnector(const AddressVec& contact_points, ProtocolVersion protocol_version, const Callback& callback); /** @@ -138,18 +139,17 @@ class ClusterConnector : public RefCounted { private: void internal_resolve_and_connect(); void internal_connect(const Address& address, ProtocolVersion version); - void internal_connect_all(); void internal_cancel(); void finish(); void maybe_finish(); void on_error(ClusterError code, const String& message); - void on_resolve(MultiResolver* resolver); + void on_resolve(ClusterMetadataResolver* resolver); void on_connect(ControlConnector* connector); private: - class ConnectorMap : public DenseHashMap { + class ConnectorMap : public DenseHashMap { public: ConnectorMap() { set_empty_key(Address::EMPTY_KEY); @@ -159,16 +159,16 @@ class ClusterConnector : public RefCounted { private: Cluster::Ptr cluster_; - MultiResolver::Ptr resolver_; + ClusterMetadataResolver::Ptr resolver_; ConnectorMap connectors_; size_t remaining_connector_count_; - ContactPointList contact_points_; - AddressVec contact_points_resolved_; + AddressVec contact_points_; ProtocolVersion protocol_version_; ClusterListener* listener_; EventLoop* event_loop_; Random* random_; Metrics* metrics_; + String local_dc_; ClusterSettings settings_; Callback callback_; diff --git a/src/cluster_metadata_resolver.cpp b/src/cluster_metadata_resolver.cpp new file mode 100644 index 000000000..78ef0c70d --- /dev/null +++ b/src/cluster_metadata_resolver.cpp @@ -0,0 +1,106 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "cluster_metadata_resolver.hpp" + +#include "cluster.hpp" +#include "logger.hpp" + +using namespace datastax::internal::core; + +namespace { + +class DefaultClusterMetadataResolver : public ClusterMetadataResolver { +public: + DefaultClusterMetadataResolver(uint64_t resolve_timeout_ms, int port) + : resolve_timeout_ms_(resolve_timeout_ms) + , port_(port) {} + +private: + virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) { + inc_ref(); + + for (AddressVec::const_iterator it = contact_points.begin(), end = contact_points.end(); + it != end; ++it) { + // If the port is not set then use the default port value. + int port = it->port() <= 0 ? port_ : it->port(); + + if (it->is_resolved()) { + resolved_contact_points_.push_back(Address(it->hostname_or_address(), port)); + } else { + if (!resolver_) { + resolver_.reset( + new MultiResolver(bind_callback(&DefaultClusterMetadataResolver::on_resolve, this))); + } + resolver_->resolve(loop, it->hostname_or_address(), port, resolve_timeout_ms_); + } + } + + if (!resolver_) { + callback_(this); + dec_ref(); + return; + } + } + + virtual void internal_cancel() { + if (resolver_) resolver_->cancel(); + } + +private: + void on_resolve(MultiResolver* resolver) { + const Resolver::Vec& resolvers = resolver->resolvers(); + for (Resolver::Vec::const_iterator it = resolvers.begin(), end = resolvers.end(); it != end; + ++it) { + const Resolver::Ptr resolver(*it); + if (resolver->is_success()) { + const AddressVec& addresses = resolver->addresses(); + if (!addresses.empty()) { + for (AddressVec::const_iterator it = addresses.begin(), end = addresses.end(); it != end; + ++it) { + resolved_contact_points_.push_back(*it); + } + } else { + LOG_ERROR("No addresses resolved for %s:%d\n", resolver->hostname().c_str(), + resolver->port()); + } + } else if (resolver->is_timed_out()) { + LOG_ERROR("Timed out attempting to resolve address for %s:%d\n", + resolver->hostname().c_str(), resolver->port()); + } else if (!resolver->is_canceled()) { + LOG_ERROR("Unable to resolve address for %s:%d\n", resolver->hostname().c_str(), + resolver->port()); + } + } + + callback_(this); + dec_ref(); + } + +private: + MultiResolver::Ptr resolver_; + const uint64_t resolve_timeout_ms_; + const int port_; +}; + +} // namespace + +ClusterMetadataResolver::Ptr +DefaultClusterMetadataResolverFactory::new_instance(const ClusterSettings& settings) const { + return ClusterMetadataResolver::Ptr(new DefaultClusterMetadataResolver( + settings.control_connection_settings.connection_settings.socket_settings.resolve_timeout_ms, + settings.port)); +} diff --git a/src/cluster_metadata_resolver.hpp b/src/cluster_metadata_resolver.hpp new file mode 100644 index 000000000..90e91acbd --- /dev/null +++ b/src/cluster_metadata_resolver.hpp @@ -0,0 +1,90 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef DATASTAX_INTERNAL_CLUSTER_METADATA_RESOLVER_HPP +#define DATASTAX_INTERNAL_CLUSTER_METADATA_RESOLVER_HPP + +#include "address.hpp" +#include "allocated.hpp" +#include "callback.hpp" +#include "ref_counted.hpp" +#include "resolver.hpp" + +#include + +namespace datastax { namespace internal { namespace core { + +struct ClusterSettings; + +/** + * An abstract class for resolving contact points and other cluster metadata. + */ +class ClusterMetadataResolver : public RefCounted { +public: + typedef SharedRefPtr Ptr; + typedef internal::Callback Callback; + + virtual ~ClusterMetadataResolver() {} + + void resolve(uv_loop_t* loop, const AddressVec& contact_points, const Callback& callback) { + callback_ = callback; + internal_resolve(loop, contact_points); + } + + virtual void cancel() { internal_cancel(); } + + const AddressVec& resolved_contact_points() const { return resolved_contact_points_; } + const String& local_dc() const { return local_dc_; } + +protected: + virtual void internal_resolve(uv_loop_t* loop, const AddressVec& contact_points) = 0; + + virtual void internal_cancel() = 0; + +protected: + AddressVec resolved_contact_points_; + String local_dc_; + Callback callback_; +}; + +/** + * A interface for constructing instances of `ClusterMetadataResolver`s. The factory's instance + * creation method is passed the cluster settings object to allow cluster metadata resolvers to + * configure themselves with appropriate settings. + */ +class ClusterMetadataResolverFactory : public RefCounted { +public: + typedef SharedRefPtr Ptr; + + virtual ~ClusterMetadataResolverFactory() {} + virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const = 0; + virtual const char* name() const = 0; +}; + +/** + * This factory creates cluster metadata resolvers that determine contact points using DNS. + * Domain names with multiple A-records are expanded into multiple contact points and addresses + * that are already resolved are passed through. + */ +class DefaultClusterMetadataResolverFactory : public ClusterMetadataResolverFactory { +public: + virtual ClusterMetadataResolver::Ptr new_instance(const ClusterSettings& settings) const; + virtual const char* name() const { return "Default"; } +}; + +}}} // namespace datastax::internal::core + +#endif diff --git a/src/config.cpp b/src/config.cpp index 86924ee23..5eb38635d 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -21,10 +21,6 @@ using namespace datastax::internal::core; void Config::init_profiles() { // Initialize the profile settings (if needed) for (ExecutionProfile::Map::iterator it = profiles_.begin(); it != profiles_.end(); ++it) { - if (it->second.consistency() == CASS_CONSISTENCY_UNKNOWN) { - it->second.set_consistency(default_profile_.consistency()); - } - if (it->second.serial_consistency() == CASS_CONSISTENCY_UNKNOWN) { it->second.set_serial_consistency(default_profile_.serial_consistency()); } diff --git a/src/config.hpp b/src/config.hpp index 5265f05ee..709313391 100644 --- a/src/config.hpp +++ b/src/config.hpp @@ -19,6 +19,8 @@ #include "auth.hpp" #include "cassandra.h" +#include "cloud_secure_connection_config.hpp" +#include "cluster_metadata_resolver.hpp" #include "constants.hpp" #include "execution_profile.hpp" #include "protocol.hpp" @@ -71,11 +73,11 @@ class Config { , no_compact_(CASS_DEFAULT_NO_COMPACT) , is_client_id_set_(false) , host_listener_(new DefaultHostListener()) - , monitor_reporting_interval_secs_(CASS_DEFAULT_CLIENT_MONITOR_EVENTS_INTERVAL_SECS) { + , monitor_reporting_interval_secs_(CASS_DEFAULT_CLIENT_MONITOR_EVENTS_INTERVAL_SECS) + , cluster_metadata_resolver_factory_(new DefaultClusterMetadataResolverFactory()) { profiles_.set_empty_key(String()); // Assign the defaults to the cluster profile - default_profile_.set_consistency(CASS_DEFAULT_CONSISTENCY); default_profile_.set_serial_consistency(CASS_DEFAULT_SERIAL_CONSISTENCY); default_profile_.set_request_timeout(CASS_DEFAULT_REQUEST_TIMEOUT_MS); default_profile_.set_load_balancing_policy(new DCAwarePolicy()); @@ -164,9 +166,9 @@ class Config { void set_resolve_timeout(unsigned timeout_ms) { resolve_timeout_ms_ = timeout_ms; } - const ContactPointList& contact_points() const { return contact_points_; } + const AddressVec& contact_points() const { return contact_points_; } - ContactPointList& contact_points() { return contact_points_; } + AddressVec& contact_points() { return contact_points_; } int port() const { return port_; } @@ -229,6 +231,7 @@ class Config { const SslContext::Ptr& ssl_context() const { return ssl_context_; } void set_ssl_context(SslContext* ssl_context) { ssl_context_.reset(ssl_context); } + void set_ssl_context(const SslContext::Ptr& ssl_context) { ssl_context_ = ssl_context; } bool token_aware_routing() const { return default_profile().token_aware_routing(); } @@ -369,6 +372,33 @@ class Config { monitor_reporting_interval_secs_ = interval_secs; }; + const CloudSecureConnectionConfig& cloud_secure_connection_config() const { + return cloud_secure_connection_config_; + } + bool set_cloud_secure_connection_bundle(const String& path) { + return cloud_secure_connection_config_.load(path, this); + } + + const ClusterMetadataResolverFactory::Ptr& cluster_metadata_resolver_factory() const { + return cluster_metadata_resolver_factory_; + } + + void set_cluster_metadata_resolver_factory(const ClusterMetadataResolverFactory::Ptr& factory) { + cluster_metadata_resolver_factory_ = factory; + } + + void set_default_consistency(CassConsistency consistency) { + if (default_profile_.consistency() == CASS_CONSISTENCY_UNKNOWN) { + default_profile_.set_consistency(consistency); + } + + for (ExecutionProfile::Map::iterator it = profiles_.begin(); it != profiles_.end(); ++it) { + if (it->second.consistency() == CASS_CONSISTENCY_UNKNOWN) { + it->second.set_consistency(consistency); + } + } + } + private: void init_profiles(); @@ -376,7 +406,7 @@ class Config { int port_; ProtocolVersion protocol_version_; bool use_beta_protocol_version_; - ContactPointList contact_points_; + AddressVec contact_points_; unsigned thread_count_io_; unsigned queue_size_io_; unsigned core_connections_per_host_; @@ -416,6 +446,8 @@ class Config { CassUuid client_id_; DefaultHostListener::Ptr host_listener_; unsigned monitor_reporting_interval_secs_; + CloudSecureConnectionConfig cloud_secure_connection_config_; + ClusterMetadataResolverFactory::Ptr cluster_metadata_resolver_factory_; }; }}} // namespace datastax::internal::core diff --git a/src/connection.cpp b/src/connection.cpp index c26d9dec2..a49cd02af 100644 --- a/src/connection.cpp +++ b/src/connection.cpp @@ -47,19 +47,18 @@ HeartbeatCallback::HeartbeatCallback(Connection* connection) , connection_(connection) {} void HeartbeatCallback::on_internal_set(ResponseMessage* response) { - LOG_TRACE("Heartbeat completed on host %s", connection_->socket_->address_string().c_str()); + LOG_TRACE("Heartbeat completed on host %s", connection_->host_->address_string().c_str()); connection_->heartbeat_outstanding_ = false; } void HeartbeatCallback::on_internal_error(CassError code, const String& message) { LOG_WARN("An error occurred on host %s during a heartbeat request: %s", - connection_->socket_->address_string().c_str(), message.c_str()); + connection_->host_->address_string().c_str(), message.c_str()); connection_->heartbeat_outstanding_ = false; } void HeartbeatCallback::on_internal_timeout() { - LOG_WARN("Heartbeat request timed out on host %s", - connection_->socket_->address_string().c_str()); + LOG_WARN("Heartbeat request timed out on host %s", connection_->host_->address_string().c_str()); connection_->heartbeat_outstanding_ = false; } @@ -116,8 +115,6 @@ Connection::Connection(const Socket::Ptr& socket, const Host::Ptr& host, , heartbeat_interval_secs_(heartbeat_interval_secs) , heartbeat_outstanding_(false) { inc_ref(); // For the event loop - - assert(host_->address() == socket_->address() && "Host doesn't match socket address"); host_->increment_connection_count(); } @@ -133,34 +130,12 @@ int32_t Connection::write(const RequestCallback::Ptr& callback) { int32_t request_size = socket_->write(callback.get()); - if (request_size < 0) { + if (request_size <= 0) { stream_manager_.release(stream); - - switch (request_size) { - case SocketRequest::SOCKET_REQUEST_ERROR_CLOSED: - callback->on_error(CASS_ERROR_LIB_WRITE_ERROR, "Unable to write to close socket"); - break; - - case SocketRequest::SOCKET_REQUEST_ERROR_NO_HANDLER: - callback->on_error(CASS_ERROR_LIB_WRITE_ERROR, - "Socket is not properly configured with a handler"); - break; - - case Request::REQUEST_ERROR_BATCH_WITH_NAMED_VALUES: - case Request::REQUEST_ERROR_PARAMETER_UNSET: - // Already handled with a specific error. - break; - - case Request::REQUEST_ERROR_UNSUPPORTED_PROTOCOL: - callback->on_error(CASS_ERROR_LIB_MESSAGE_ENCODE, - "Operation unsupported by this protocol version"); - break; - - default: - callback->on_error(CASS_ERROR_LIB_WRITE_ERROR, "Unspecified write error occurred"); - break; + if (request_size == 0) { + callback->on_error(CASS_ERROR_LIB_MESSAGE_ENCODE, "The encoded request had no data to write"); + return Request::REQUEST_ERROR_NO_DATA_WRITTEN; } - return request_size; } @@ -169,7 +144,7 @@ int32_t Connection::write(const RequestCallback::Ptr& callback) { LOG_TRACE("Sending message type %s with stream %d on host %s", opcode_to_string(callback->request()->opcode()).c_str(), stream, - socket_->address_string().c_str()); + host_->address_string().c_str()); callback->set_state(RequestCallback::REQUEST_STATE_WRITING); @@ -274,7 +249,7 @@ void Connection::on_read(const char* buf, size_t size) { LOG_TRACE("Consumed message type %s with stream %d, input %u, remaining %u on host %s", opcode_to_string(response->opcode()).c_str(), static_cast(response->stream()), static_cast(size), static_cast(remaining), - socket_->address_string().c_str()); + host_->address_string().c_str()); if (response->stream() < 0) { if (response->opcode() == CQL_OPCODE_EVENT) { @@ -343,7 +318,8 @@ void Connection::restart_heartbeat_timer() { void Connection::on_heartbeat(Timer* timer) { if (!heartbeat_outstanding_ && !socket_->is_closing()) { - if (!write_and_flush(RequestCallback::Ptr(new HeartbeatCallback(this)))) { + RequestCallback::Ptr callback(new HeartbeatCallback(this)); + if (write_and_flush(callback) < 0) { // Recycling only this connection with a timeout error. This is unlikely and // it means the connection ran out of stream IDs as a result of requests // that never returned and as a result timed out. diff --git a/src/connection.hpp b/src/connection.hpp index 84ecc33c7..d8878f027 100644 --- a/src/connection.hpp +++ b/src/connection.hpp @@ -202,8 +202,9 @@ class Connection : public RefCounted { void start_heartbeats(); public: - const Address& address() const { return socket_->address(); } - const String& address_string() const { return socket_->address_string(); } + const Address& address() const { return host_->address(); } + const String& address_string() const { return host_->address_string(); } + const Address& resolved_address() const { return socket_->address(); } const Host::Ptr& host() const { return host_; } ProtocolVersion protocol_version() const { return protocol_version_; } const String& keyspace() { return keyspace_; } diff --git a/src/connection_pool.cpp b/src/connection_pool.cpp index a6908cbdf..d22d73c11 100644 --- a/src/connection_pool.cpp +++ b/src/connection_pool.cpp @@ -27,6 +27,13 @@ using namespace datastax; using namespace datastax::internal::core; static inline bool least_busy_comp(const PooledConnection::Ptr& a, const PooledConnection::Ptr& b) { + // Don't consider closed connections to be the least busy. + if (a->is_closing()) { // "a" is closed so it can't be the least busy. + return false; + } else if (b->is_closing()) { // "a" is not close, but "b" is closed so "a" is less busy. + return true; + } + // Both "a" and "b" are not closed so compare their inflight request counts. return a->inflight_request_count() < b->inflight_request_count(); } @@ -89,10 +96,12 @@ ConnectionPool::ConnectionPool(const Connection::Vec& connections, ConnectionPoo } PooledConnection::Ptr ConnectionPool::find_least_busy() const { - if (connections_.empty()) { + PooledConnection::Vec::const_iterator it = + std::min_element(connections_.begin(), connections_.end(), least_busy_comp); + if (it == connections_.end() || (*it)->is_closing()) { return PooledConnection::Ptr(); } - return *std::min_element(connections_.begin(), connections_.end(), least_busy_comp); + return *it; } bool ConnectionPool::has_connections() const { return !connections_.empty(); } diff --git a/src/connection_pool.hpp b/src/connection_pool.hpp index 116b22e50..cd6a5e89c 100644 --- a/src/connection_pool.hpp +++ b/src/connection_pool.hpp @@ -104,7 +104,7 @@ class ConnectionPool : public RefCounted { typedef SharedRefPtr Ptr; typedef DenseHashMap ReconnectionSchedules; - class Map : public DenseHashMap { + class Map : public DenseHashMap { public: Map() { set_empty_key(Address::EMPTY_KEY); @@ -131,7 +131,7 @@ class ConnectionPool : public RefCounted { /** * Find the least busy connection for the pool. The least busy connection has - * the lowest number of outstanding requests. + * the lowest number of outstanding requests and is not closed. * * @return The least busy connection or null if no connection is available. */ diff --git a/src/connector.cpp b/src/connector.cpp index c58959a47..76cf411b5 100644 --- a/src/connector.cpp +++ b/src/connector.cpp @@ -67,11 +67,9 @@ StartupCallback::StartupCallback(Connector* connector, const Request::ConstPtr& void StartupCallback::on_internal_set(ResponseMessage* response) { switch (response->opcode()) { -#ifdef CASS_USE_OPTIONS case CQL_OPCODE_SUPPORTED: connector_->on_supported(response); break; -#endif case CQL_OPCODE_ERROR: { ErrorResponse* error = static_cast(response->response_body().get()); @@ -280,26 +278,19 @@ void Connector::on_ready_or_register_for_events() { } } -// TODO: We don't currently do anything with the options returned from the -// SUPPORTED response. In the not too distant future we will be using these -// options for at least compression, but likely other things too. -#ifdef CASS_USE_OPTIONS -void ConnectionConnector::on_supported(ResponseMessage* response) { +void Connector::on_supported(ResponseMessage* response) { SupportedResponse* supported = static_cast(response->response_body().get()); - - // TODO: Do something with the supported info - (void)supported; + supported_options_ = supported->supported_options(); connection_->write_and_flush(RequestCallback::Ptr(new StartupCallback( this, Request::ConstPtr(new StartupRequest(settings_.application_name, settings_.application_version, settings_.client_id, settings_.no_compact))))); } -#endif void Connector::on_authenticate(const String& class_name) { Authenticator::Ptr auth(settings_.auth_provider->new_authenticator( - socket_connector_->address(), socket_connector_->hostname(), class_name)); + host_->address(), socket_connector_->hostname(), class_name)); if (!auth) { on_error(CONNECTION_ERROR_AUTH, "Authentication required but no auth provider set"); } else { @@ -355,15 +346,9 @@ void Connector::on_connect(SocketConnector* socket_connector) { socket->set_handler(new ConnectionHandler(connection_.get())); } -#ifdef CASS_USE_OPTIONS connection_->write_and_flush( RequestCallback::Ptr(new StartupCallback(this, Request::ConstPtr(new OptionsRequest())))); -#else - connection_->write_and_flush(RequestCallback::Ptr(new StartupCallback( - this, Request::ConstPtr(new StartupRequest(settings_.application_name, - settings_.application_version, - settings_.client_id, settings_.no_compact))))); -#endif + } else if (socket_connector->is_canceled() || is_timeout_error()) { finish(); } else if (socket_connector->error_code() == SocketConnector::SOCKET_ERROR_CONNECT) { diff --git a/src/connector.hpp b/src/connector.hpp index 28cbf30a8..7e22e72c6 100644 --- a/src/connector.hpp +++ b/src/connector.hpp @@ -164,7 +164,7 @@ class Connector public: uv_loop_t* loop() { return loop_; } - const Address& address() const { return socket_connector_->address(); } + const Address& address() const { return host_->address(); } const ProtocolVersion protocol_version() const { return protocol_version_; } bool is_ok() const { return error_code_ == CONNECTION_OK; } @@ -181,6 +181,8 @@ class Connector return is_auth_error() || is_ssl_error() || is_invalid_protocol() || is_keyspace_error(); } + const StringMultimap& supported_options() const { return supported_options_; } + ConnectionError error_code() { return error_code_; } const String& error_message() { return error_message_; } @@ -217,6 +219,8 @@ class Connector ConnectionError error_code_; String error_message_; + StringMultimap supported_options_; + ProtocolVersion protocol_version_; String keyspace_; int event_types_; diff --git a/src/constants.hpp b/src/constants.hpp index 84293368a..d85a48cb0 100644 --- a/src/constants.hpp +++ b/src/constants.hpp @@ -130,7 +130,8 @@ #define CASS_DEFAULT_TCP_KEEPALIVE_ENABLED true #define CASS_DEFAULT_TCP_NO_DELAY_ENABLED true #define CASS_DEFAULT_THREAD_COUNT_IO 1 -#define CASS_DEFAULT_TOKEN_AWARE_ROUTING true +#define CASS_DEFAULT_USE_TOKEN_AWARE_ROUTING true +#define CASS_DEFAULT_USE_SNI_ROUTING false #define CASS_DEFAULT_USE_BETA_PROTOCOL_VERSION false #define CASS_DEFAULT_USE_RANDOMIZED_CONTACT_POINTS true #define CASS_DEFAULT_USE_SCHEMA true @@ -155,6 +156,12 @@ #define CASS_LOWEST_SUPPORTED_PROTOCOL_VERSION 3 #define CASS_NEWEST_BETA_PROTOCOL_VERSION 5 +// DBaaS product type identification +#define CASS_DBAAS_PRODUCT_TYPE "DATASTAX_APOLLO" + +// DBaaS defaults +#define CASS_DEFAULT_DBAAS_CONSISTENCY CASS_CONSISTENCY_LOCAL_QUORUM + #define CASS_MAX_STREAMS 32768 // Protocol v3+: 2 ^ (16 - 1) (2 bytes) #endif diff --git a/src/control_connection.cpp b/src/control_connection.cpp index c2f1c46e5..2129f9f7e 100644 --- a/src/control_connection.cpp +++ b/src/control_connection.cpp @@ -304,14 +304,34 @@ class NopControlConnectionListener : public ControlConnectionListener { static NopControlConnectionListener nop_listener__; +ControlConnectionSettings::ControlConnectionSettings() + : use_schema(CASS_DEFAULT_USE_SCHEMA) + , use_token_aware_routing(CASS_DEFAULT_USE_TOKEN_AWARE_ROUTING) + , address_factory(new DefaultAddressFactory()) {} + +ControlConnectionSettings::ControlConnectionSettings(const Config& config) + : connection_settings(config) + , use_schema(config.use_schema()) + , use_token_aware_routing(config.token_aware_routing()) + , address_factory(create_address_factory_from_config(config)) {} + +ControlConnector::ControlConnector(const Host::Ptr& host, ProtocolVersion protocol_version, + const Callback& callback) + : connector_( + new Connector(host, protocol_version, bind_callback(&ControlConnector::on_connect, this))) + , callback_(callback) + , error_code_(CONTROL_CONNECTION_OK) + , listener_(NULL) + , metrics_(NULL) {} + ControlConnection::ControlConnection(const Connection::Ptr& connection, - ControlConnectionListener* listener, bool use_schema, - bool token_aware_routing, const VersionNumber& server_version, + ControlConnectionListener* listener, + const ControlConnectionSettings& settings, + const VersionNumber& server_version, const VersionNumber& dse_server_version, ListenAddressMap listen_addresses) : connection_(connection) - , use_schema_(use_schema) - , token_aware_routing_(token_aware_routing) + , settings_(settings) , server_version_(server_version) , dse_server_version_(dse_server_version) , listen_addresses_(listen_addresses) @@ -337,7 +357,7 @@ void ControlConnection::set_listener(ControlConnectionListener* listener) { } void ControlConnection::refresh_node(RefreshNodeType type, const Address& address) { - bool is_connected_host = (address == this->address()); + bool is_connected_host = connection_->host()->rpc_address().equals(address, false); String query; bool is_all_peers = false; @@ -358,8 +378,8 @@ void ControlConnection::refresh_node(RefreshNodeType type, const Address& addres LOG_DEBUG("Refresh node: %s", query.c_str()); - if (write_and_flush(RequestCallback::Ptr( - new RefreshNodeCallback(address, type, is_all_peers, query, this))) < 0) { + RequestCallback::Ptr callback(new RefreshNodeCallback(address, type, is_all_peers, query, this)); + if (write_and_flush(callback) < 0) { LOG_ERROR("No more stream available while attempting to refresh node info"); defunct(); } @@ -371,57 +391,47 @@ void ControlConnection::on_refresh_node(ControlRequestCallback* callback) { } void ControlConnection::handle_refresh_node(RefreshNodeCallback* callback) { - const ResultResponse::Ptr result = callback->result(); + bool found_host = false; + const Row* row = NULL; + ResultIterator rows(callback->result().get()); - if (result->row_count() == 0) { + while (rows.next() && !found_host) { + row = rows.row(); + if (callback->is_all_peers) { + Address address; + bool is_valid_address = settings_.address_factory->create(row, connection_->host(), &address); + if (is_valid_address && callback->address == address) { + found_host = true; + } + } else { + found_host = true; + } + } + + if (!found_host) { String address_str = callback->address.to_string(); - LOG_ERROR("No row found for host %s in %s's local/peers system table. " + LOG_ERROR("No row found for host %s in %s's peers system table. " "%s will be ignored.", address_str.c_str(), address_string().c_str(), address_str.c_str()); return; } - Host::Ptr host(new Host(callback->address)); - if (!callback->is_all_peers) { - host->set(&result->first_row(), token_aware_routing_); - listen_addresses_[callback->address] = - determine_listen_address(callback->address, &result->first_row()); - } else { - ResultIterator rows(result.get()); - bool found_host = false; - while (rows.next()) { - const Row* row = rows.row(); - Address address; - bool is_valid_address = determine_address_for_peer_host( - this->address(), row->get_by_name("peer"), row->get_by_name("rpc_address"), &address); - if (is_valid_address && callback->address == address) { - host->set(row, token_aware_routing_); - listen_addresses_[callback->address] = determine_listen_address(callback->address, row); - found_host = true; + Address address; + if (settings_.address_factory->create(row, connection_->host(), &address)) { + Host::Ptr host(new Host(address)); + host->set(row, settings_.use_token_aware_routing); + listen_addresses_[host->rpc_address()] = determine_listen_address(address, row); + + switch (callback->type) { + case NEW_NODE: + listener_->on_add(host); + break; + case MOVED_NODE: + listener_->on_remove(host->address()); + listener_->on_add(host); break; - } - } - if (!found_host) { - String address_str = callback->address.to_string(); - LOG_ERROR("No row found for host %s in %s's peers system table. " - "%s will be ignored.", - address_str.c_str(), address_string().c_str(), address_str.c_str()); - return; } } - - switch (callback->type) { - case NEW_NODE: - listener_->on_add(host); - break; - case MOVED_NODE: - listener_->on_remove(host->address()); - listener_->on_add(host); - break; - default: - assert(false && "Invalid node refresh type"); - break; - } } void ControlConnection::refresh_keyspace(const StringRef& keyspace_name) { @@ -438,8 +448,9 @@ void ControlConnection::refresh_keyspace(const StringRef& keyspace_name) { LOG_DEBUG("Refreshing keyspace %s", query.c_str()); - if (write_and_flush(RequestCallback::Ptr( - new RefreshKeyspaceCallback(keyspace_name.to_string(), query, this))) < 0) { + RequestCallback::Ptr callback( + new RefreshKeyspaceCallback(keyspace_name.to_string(), query, this)); + if (write_and_flush(callback) < 0) { LOG_ERROR("No more stream available while attempting to refresh keyspace info"); defunct(); } @@ -584,8 +595,9 @@ void ControlConnection::refresh_type(const StringRef& keyspace_name, const Strin LOG_DEBUG("Refreshing type %s", query.c_str()); - if (!write_and_flush(RequestCallback::Ptr(new RefreshTypeCallback( - keyspace_name.to_string(), type_name.to_string(), query, this)))) { + RequestCallback::Ptr callback( + new RefreshTypeCallback(keyspace_name.to_string(), type_name.to_string(), query, this)); + if (write_and_flush(callback) < 0) { LOG_ERROR("No more stream available while attempting to refresh type info"); defunct(); } @@ -644,9 +656,10 @@ void ControlConnection::refresh_function(const StringRef& keyspace_name, request->set(1, CassString(function_name.data(), function_name.size())); request->set(2, signature.get()); - if (!write_and_flush(RequestCallback::Ptr( - new RefreshFunctionCallback(keyspace_name.to_string(), function_name.to_string(), - to_strings(arg_types), is_aggregate, request, this)))) { + RequestCallback::Ptr callback( + new RefreshFunctionCallback(keyspace_name.to_string(), function_name.to_string(), + to_strings(arg_types), is_aggregate, request, this)); + if (write_and_flush(callback) < 0) { LOG_ERROR("No more stream available while attempting to refresh function info"); defunct(); } @@ -724,7 +737,7 @@ void ControlConnection::on_event(const EventResponse::Ptr& response) { case CASS_EVENT_SCHEMA_CHANGE: // Only handle keyspace events when using token-aware routing - if (!use_schema_ && response->schema_change_target() != EventResponse::KEYSPACE) { + if (!settings_.use_schema && response->schema_change_target() != EventResponse::KEYSPACE) { return; } diff --git a/src/control_connection.hpp b/src/control_connection.hpp index ce27e894e..d8eeacfa6 100644 --- a/src/control_connection.hpp +++ b/src/control_connection.hpp @@ -18,6 +18,7 @@ #define DATASTAX_INTERNAL_CONTROL_CONNECTION_HPP #include "address.hpp" +#include "address_factory.hpp" #include "config.hpp" #include "connection.hpp" #include "connector.hpp" @@ -141,7 +142,7 @@ class ControlConnectionListener { * A mapping between a host's address and it's listening address. The listening * address is used to look up a peer in the "system.peers" table. */ -class ListenAddressMap : public DenseHashMap { +class ListenAddressMap : public DenseHashMap { public: ListenAddressMap() { set_empty_key(Address::EMPTY_KEY); @@ -149,6 +150,44 @@ class ListenAddressMap : public DenseHashMap { } }; +/** + * Control connection settings. + */ +struct ControlConnectionSettings { + /** + * Constructor. Initialize with default settings. + */ + ControlConnectionSettings(); + + /** + * Constructor. Initialize the settings from a config object. + * + * @param config The config object. + */ + ControlConnectionSettings(const Config& config); + + /** + * The settings for the underlying connection. + */ + ConnectionSettings connection_settings; + + /** + * If true then the control connection will listen for schema events. + */ + bool use_schema; + + /** + * If true then the control connection will listen for keyspace schema + * events. This is needed for the keyspaces replication strategy. + */ + bool use_token_aware_routing; + + /** + * A factory for creating addresses (for the connection process). + */ + AddressFactory::Ptr address_factory; +}; + /** * A control connection. This is a wrapper around a connection that handles * schema, node status, and topology changes. This class handles events @@ -168,16 +207,13 @@ class ControlConnection * * @param connection The wrapped connection. * @param listener A listener to handle events. - * @param use_schema If true then connection will get additional data for - * schema events, otherwise it will ignore those events. - * @param token_aware_routing If true the connection will get additional data - * for keyspace schema changes, otherwise it will ignore those events. + * @param settings The control connection's settings. * @param server_version The version number of the server implementation. * @param dse_server_version The version number of the DSE server implementation. * @param listen_addresses The current state of the listen addresses map. */ ControlConnection(const Connection::Ptr& connection, ControlConnectionListener* listener, - bool use_schema, bool token_aware_routing, const VersionNumber& server_version, + const ControlConnectionSettings& settings, const VersionNumber& server_version, const VersionNumber& dse_server_version, ListenAddressMap listen_addresses); /** @@ -211,6 +247,8 @@ class ControlConnection const String& address_string() const { return connection_->address_string(); } + const Address& resolved_address() const { return connection_->resolved_address(); } + ProtocolVersion protocol_version() const { return connection_->protocol_version(); } const VersionNumber& server_version() { return server_version_; } @@ -256,8 +294,7 @@ class ControlConnection private: Connection::Ptr connection_; - bool use_schema_; - bool token_aware_routing_; + ControlConnectionSettings settings_; VersionNumber server_version_; VersionNumber dse_server_version_; ListenAddressMap listen_addresses_; diff --git a/src/control_connector.cpp b/src/control_connector.cpp index 03b9d7867..66e2c4b2d 100644 --- a/src/control_connector.cpp +++ b/src/control_connector.cpp @@ -80,24 +80,6 @@ class SchemaConnectorRequestCallback : public ChainedRequestCallback { }}} // namespace datastax::internal::core -ControlConnectionSettings::ControlConnectionSettings() - : use_schema(CASS_DEFAULT_USE_SCHEMA) - , token_aware_routing(CASS_DEFAULT_TOKEN_AWARE_ROUTING) {} - -ControlConnectionSettings::ControlConnectionSettings(const Config& config) - : connection_settings(config) - , use_schema(config.use_schema()) - , token_aware_routing(config.token_aware_routing()) {} - -ControlConnector::ControlConnector(const Host::Ptr& host, ProtocolVersion protocol_version, - const Callback& callback) - : connector_( - new Connector(host, protocol_version, bind_callback(&ControlConnector::on_connect, this))) - , callback_(callback) - , error_code_(CONTROL_CONNECTION_OK) - , listener_(NULL) - , metrics_(NULL) {} - ControlConnector* ControlConnector::with_listener(ControlConnectionListener* listener) { listener_ = listener; return this; @@ -116,7 +98,7 @@ ControlConnector* ControlConnector::with_settings(const ControlConnectionSetting void ControlConnector::connect(uv_loop_t* loop) { inc_ref(); int event_types = 0; - if (settings_.use_schema || settings_.token_aware_routing) { + if (settings_.use_schema || settings_.use_token_aware_routing) { event_types = CASS_EVENT_TOPOLOGY_CHANGE | CASS_EVENT_STATUS_CHANGE | CASS_EVENT_SCHEMA_CHANGE; } else { event_types = CASS_EVENT_TOPOLOGY_CHANGE | CASS_EVENT_STATUS_CHANGE; @@ -161,9 +143,8 @@ void ControlConnector::on_success() { } // Transfer ownership of the connection to the control connection. - control_connection_.reset(new ControlConnection(connection_, listener_, settings_.use_schema, - settings_.token_aware_routing, server_version_, - dse_server_version_, listen_addresses_)); + control_connection_.reset(new ControlConnection( + connection_, listener_, settings_, server_version_, dse_server_version_, listen_addresses_)); control_connection_->set_listener(listener_); @@ -223,12 +204,12 @@ void ControlConnector::query_hosts() { void ControlConnector::handle_query_hosts(HostsConnectorRequestCallback* callback) { ResultResponse::Ptr local_result(callback->result("local")); + const Host::Ptr& connected_host = connection_->host(); if (local_result && local_result->row_count() > 0) { - const Host::Ptr& host = connection_->host(); - host->set(&local_result->first_row(), settings_.token_aware_routing); - hosts_[host->address()] = host; - server_version_ = host->server_version(); - dse_server_version_ = host->dse_server_version(); + connected_host->set(&local_result->first_row(), settings_.use_token_aware_routing); + hosts_[connected_host->address()] = connected_host; + server_version_ = connected_host->server_version(); + dse_server_version_ = connected_host->dse_server_version(); } else { on_error(CONTROL_CONNECTION_ERROR_HOSTS, "No row found in " + connection_->address_string() + "'s local system table"); @@ -241,19 +222,16 @@ void ControlConnector::handle_query_hosts(HostsConnectorRequestCallback* callbac while (rows.next()) { Address address; const Row* row = rows.row(); - if (!determine_address_for_peer_host(connection_->address(), row->get_by_name("peer"), - row->get_by_name("rpc_address"), &address)) { - continue; + if (settings_.address_factory->create(row, connected_host, &address)) { + Host::Ptr host(new Host(address)); + host->set(rows.row(), settings_.use_token_aware_routing); + listen_addresses_[host->rpc_address()] = determine_listen_address(address, row); + hosts_[host->address()] = host; } - - Host::Ptr host(new Host(address)); - host->set(rows.row(), settings_.token_aware_routing); - listen_addresses_[host->address()] = determine_listen_address(address, row); - hosts_[host->address()] = host; } } - if (settings_.token_aware_routing || settings_.use_schema) { + if (settings_.use_token_aware_routing || settings_.use_schema) { query_schema(); } else { // If we're not using token aware routing or schema we can just finish. diff --git a/src/control_connector.hpp b/src/control_connector.hpp index c1610b842..17c51746a 100644 --- a/src/control_connector.hpp +++ b/src/control_connector.hpp @@ -28,39 +28,6 @@ class HostsConnectorRequestCallback; class Metrics; class SchemaConnectorRequestCallback; -/** - * Control connection settings. - */ -struct ControlConnectionSettings { - /** - * Constructor. Initialize with default settings. - */ - ControlConnectionSettings(); - - /** - * Constructor. Initialize the settings from a config object. - * - * @param config The config object. - */ - ControlConnectionSettings(const Config& config); - - /** - * The settings for the underlying connection. - */ - ConnectionSettings connection_settings; - - /** - * If true then the control connection will listen for schema events. - */ - bool use_schema; - - /** - * If true then the control connection will listen for keyspace schema - * events. This is needed for the keyspaces replication strategy. - */ - bool token_aware_routing; -}; - /** * The initial schema metadata retrieved from the cluster when the control * connection is established. @@ -198,6 +165,8 @@ class ControlConnector return error_code_ == CONTROL_CONNECTION_ERROR_CONNECTION && connector_->is_auth_error(); } + const StringMultimap& supported_options() const { return connector_->supported_options(); } + ControlConnectionError error_code() const { return error_code_; } const String& error_message() const { return error_message_; } CassError ssl_error_code() { return connector_->ssl_error_code(); } diff --git a/src/data_type.cpp b/src/data_type.cpp index 67fb5e496..10dcbdcfb 100644 --- a/src/data_type.cpp +++ b/src/data_type.cpp @@ -72,13 +72,13 @@ CassDataType* cass_data_type_new_from_existing(const CassDataType* data_type) { } CassDataType* cass_data_type_new_tuple(size_t item_count) { - DataType* data_type = new CollectionType(CASS_VALUE_TYPE_TUPLE, item_count); + DataType* data_type = new CollectionType(CASS_VALUE_TYPE_TUPLE, item_count, false); data_type->inc_ref(); return CassDataType::to(data_type); } CassDataType* cass_data_type_new_udt(size_t field_count) { - DataType* data_type = new UserType(field_count); + DataType* data_type = new UserType(field_count, false); data_type->inc_ref(); return CassDataType::to(data_type); } diff --git a/src/data_type.hpp b/src/data_type.hpp index fe829a429..0e4258403 100644 --- a/src/data_type.hpp +++ b/src/data_type.hpp @@ -192,15 +192,15 @@ class CollectionType : public CompositeType { public: typedef SharedRefPtr ConstPtr; - CollectionType(CassValueType collection_type, bool is_frozen) + explicit CollectionType(CassValueType collection_type, bool is_frozen) : CompositeType(collection_type, is_frozen) {} - CollectionType(CassValueType collection_type, size_t types_count, bool is_frozen) + explicit CollectionType(CassValueType collection_type, size_t types_count, bool is_frozen) : CompositeType(collection_type, is_frozen) { types_.reserve(types_count); } - CollectionType(CassValueType collection_type, const DataType::Vec& types, bool is_frozen) + explicit CollectionType(CassValueType collection_type, const DataType::Vec& types, bool is_frozen) : CompositeType(collection_type, types, is_frozen) {} virtual bool equals(const DataType::ConstPtr& data_type) const { @@ -310,19 +310,20 @@ class UserType : public DataType { typedef CaseInsensitiveHashTable::EntryVec FieldVec; - UserType(bool is_frozen) + explicit UserType(bool is_frozen) : DataType(CASS_VALUE_TYPE_UDT, is_frozen) {} - UserType(size_t field_count, bool is_frozen) + explicit UserType(size_t field_count, bool is_frozen) : DataType(CASS_VALUE_TYPE_UDT, is_frozen) , fields_(field_count) {} - UserType(const String& keyspace, const String& type_name, bool is_frozen) + explicit UserType(const String& keyspace, const String& type_name, bool is_frozen) : DataType(CASS_VALUE_TYPE_UDT, is_frozen) , keyspace_(keyspace) , type_name_(type_name) {} - UserType(const String& keyspace, const String& type_name, const FieldVec& fields, bool is_frozen) + explicit UserType(const String& keyspace, const String& type_name, const FieldVec& fields, + bool is_frozen) : DataType(CASS_VALUE_TYPE_UDT, is_frozen) , keyspace_(keyspace) , type_name_(type_name) diff --git a/src/dc_aware_policy.cpp b/src/dc_aware_policy.cpp index d5bedea56..d68bec1a2 100644 --- a/src/dc_aware_policy.cpp +++ b/src/dc_aware_policy.cpp @@ -42,7 +42,12 @@ DCAwarePolicy::DCAwarePolicy(const String& local_dc, size_t used_hosts_per_remot DCAwarePolicy::~DCAwarePolicy() { uv_rwlock_destroy(&available_rwlock_); } -void DCAwarePolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { +void DCAwarePolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) { + if (local_dc_.empty()) { // Only override if no local DC was specified. + local_dc_ = local_dc; + } + if (local_dc_.empty() && connected_host && !connected_host->dc().empty()) { LOG_INFO("Using '%s' for the local data center " "(if this is incorrect, please provide the correct data center)", @@ -87,7 +92,7 @@ QueryPlan* DCAwarePolicy::new_query_plan(const String& keyspace, RequestHandler* bool DCAwarePolicy::is_host_up(const Address& address) const { ScopedReadLock rl(&available_rwlock_); - return available_.count(address); + return available_.count(address) > 0; } void DCAwarePolicy::on_host_added(const Host::Ptr& host) { diff --git a/src/dc_aware_policy.hpp b/src/dc_aware_policy.hpp index 559585a54..f76b7307b 100644 --- a/src/dc_aware_policy.hpp +++ b/src/dc_aware_policy.hpp @@ -36,7 +36,8 @@ class DCAwarePolicy : public LoadBalancingPolicy { ~DCAwarePolicy(); - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random); + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc); virtual CassHostDistance distance(const Host::Ptr& host) const; diff --git a/src/decoder.cpp b/src/decoder.cpp index 4ad7abc57..310137101 100644 --- a/src/decoder.cpp +++ b/src/decoder.cpp @@ -46,7 +46,7 @@ bool Decoder::decode_inet(Address* output) { } CHECK_REMAINING(address_length, "inet"); - char address[CASS_INET_V6_LENGTH]; + uint8_t address[CASS_INET_V6_LENGTH]; memcpy(address, input_, address_length); input_ += address_length; remaining_ -= address_length; @@ -56,7 +56,8 @@ bool Decoder::decode_inet(Address* output) { input_ = internal::decode_int32(input_, port); remaining_ -= sizeof(int32_t); - return Address::from_inet(address, address_length, port, output); + *output = Address(address, address_length, port); + return output->is_valid_and_resolved(); } bool Decoder::decode_inet(CassInet* output) { @@ -77,7 +78,7 @@ bool Decoder::decode_inet(CassInet* output) { } bool Decoder::as_inet(const int address_length, CassInet* output) const { - output->address_length = address_length; + output->address_length = static_cast(address_length); if (output->address_length > CASS_INET_V6_LENGTH) { LOG_ERROR("Invalid inet address length of %d bytes", output->address_length); return false; diff --git a/src/decoder.hpp b/src/decoder.hpp index 3bfe2bc54..4d2de54c5 100644 --- a/src/decoder.hpp +++ b/src/decoder.hpp @@ -302,7 +302,8 @@ class Decoder { inline bool as_inet(const int address_length, const int port, Address* output) const { CassInet inet; if (!as_inet(address_length, &inet)) return false; - return Address::from_inet(&inet.address, inet.address_length, port, output); + *output = Address(inet.address, inet.address_length, port); + return output->is_valid_and_resolved(); } inline bool decode_string_map(Map& map) { diff --git a/src/encode.cpp b/src/encode.cpp index 0d872a51f..a585b5ea6 100644 --- a/src/encode.cpp +++ b/src/encode.cpp @@ -23,7 +23,7 @@ namespace datastax { namespace internal { namespace core { static char* encode_vint(char* output, uint64_t value, size_t value_size) { if (value_size == 1) { // This is just a one byte value; write it and get out. - *output = value; + *output = static_cast(value); return output + 1; } diff --git a/src/execute_request.cpp b/src/execute_request.cpp index f9919d068..33b642ef3 100644 --- a/src/execute_request.cpp +++ b/src/execute_request.cpp @@ -40,7 +40,7 @@ int ExecuteRequest::encode(ProtocolVersion version, RequestCallback* callback, length += bufs->back().size(); } } - length += encode_begin(version, elements().size(), callback, bufs); + length += encode_begin(version, static_cast(elements().size()), callback, bufs); int32_t result = encode_values(version, callback, bufs); if (result < 0) return result; length += result; diff --git a/src/host.cpp b/src/host.cpp index 2a566acbd..1229c1ead 100644 --- a/src/host.cpp +++ b/src/host.cpp @@ -140,6 +140,24 @@ void Host::set(const Row* row, bool use_tokens) { } } } + + v = row->get_by_name("rpc_address"); + if (v && !v->is_null()) { + if (!v->decoder().as_inet(v->size(), address_.port(), &rpc_address_)) { + LOG_WARN("Invalid address format for `rpc_address`"); + } + if (Address("0.0.0.0", 0).equals(rpc_address_, false) || + Address("::", 0).equals(rpc_address_, false)) { + LOG_WARN("Found host with 'bind any' for rpc_address; using listen_address (%s) to contact " + "instead. " + "If this is incorrect you should configure a specific interface for rpc_address on " + "the server.", + address_string_.c_str()); + } + } else { + LOG_WARN("No rpc_address for host %s in system.local or system.peers.", + address_string_.c_str()); + } } ExternalHostListener::ExternalHostListener(const CassHostListenerCallback callback, void* data) diff --git a/src/host.hpp b/src/host.hpp index 0bfc91daa..28665aeed 100644 --- a/src/host.hpp +++ b/src/host.hpp @@ -96,6 +96,7 @@ class Host : public RefCounted { Host(const Address& address) : address_(address) + , rpc_address_(address) , rack_id_(0) , dc_id_(0) , address_string_(address.to_string()) @@ -105,6 +106,8 @@ class Host : public RefCounted { const Address& address() const { return address_; } const String& address_string() const { return address_string_; } + const Address& rpc_address() const { return rpc_address_; } + void set(const Row* row, bool use_tokens); const String& rack() const { return rack_; } @@ -197,6 +200,7 @@ class Host : public RefCounted { private: Address address_; + Address rpc_address_; uint32_t rack_id_; uint32_t dc_id_; String address_string_; diff --git a/src/host_targeting_policy.cpp b/src/host_targeting_policy.cpp index 395337c84..eac6fa897 100644 --- a/src/host_targeting_policy.cpp +++ b/src/host_targeting_policy.cpp @@ -21,11 +21,11 @@ using namespace datastax::internal; using namespace datastax::internal::core; void HostTargetingPolicy::init(const SharedRefPtr& connected_host, const core::HostMap& hosts, - Random* random) { + Random* random, const String& local_dc) { for (core::HostMap::const_iterator it = hosts.begin(), end = hosts.end(); it != end; ++it) { hosts_[it->first] = it->second; } - ChainedLoadBalancingPolicy::init(connected_host, hosts, random); + ChainedLoadBalancingPolicy::init(connected_host, hosts, random, local_dc); } QueryPlan* HostTargetingPolicy::new_query_plan(const String& keyspace, diff --git a/src/host_targeting_policy.hpp b/src/host_targeting_policy.hpp index fe8616f98..81a995da4 100644 --- a/src/host_targeting_policy.hpp +++ b/src/host_targeting_policy.hpp @@ -29,7 +29,8 @@ class HostTargetingPolicy : public ChainedLoadBalancingPolicy { HostTargetingPolicy(LoadBalancingPolicy* child_policy) : ChainedLoadBalancingPolicy(child_policy) {} - virtual void init(const SharedRefPtr& connected_host, const HostMap& hosts, Random* random); + virtual void init(const SharedRefPtr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc); virtual QueryPlan* new_query_plan(const String& keyspace, RequestHandler* request_handler, const TokenMap* token_map); @@ -58,7 +59,7 @@ class HostTargetingPolicy : public ChainedLoadBalancingPolicy { }; private: - class HostMap : public DenseHashMap { + class HostMap : public DenseHashMap { public: HostMap() { set_empty_key(Address::EMPTY_KEY); diff --git a/src/http_client.cpp b/src/http_client.cpp new file mode 100644 index 000000000..48cfe48f4 --- /dev/null +++ b/src/http_client.cpp @@ -0,0 +1,221 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "http_client.hpp" + +#include "constants.hpp" +#include "driver_info.hpp" + +using namespace datastax; +using namespace datastax::internal::core; + +namespace datastax { namespace internal { namespace core { + +class HttpClientSocketHandler : public SocketHandler { +public: + HttpClientSocketHandler(HttpClient* client) + : client_(client) {} + + virtual void on_read(Socket* socket, ssize_t nread, const uv_buf_t* buf) { + client_->on_read(buf->base, nread); + free_buffer(buf); + } + + virtual void on_write(Socket* socket, int status, SocketRequest* request) { delete request; } + + virtual void on_close() { client_->finish(); } + +private: + HttpClient* client_; +}; + +class HttpClientSslSocketHandler : public SslSocketHandler { +public: + HttpClientSslSocketHandler(SslSession* ssl_session, HttpClient* client) + : SslSocketHandler(ssl_session) + , client_(client) {} + + virtual void on_ssl_read(Socket* socket, char* buf, size_t size) { client_->on_read(buf, size); } + + virtual void on_write(Socket* socket, int status, SocketRequest* request) { delete request; } + + virtual void on_close() { client_->finish(); } + +private: + HttpClient* client_; +}; + +}}} // namespace datastax::internal::core + +HttpClient::HttpClient(const Address& address, const String& path, const Callback& callback) + : error_code_(HTTP_CLIENT_OK) + , address_(address) + , path_(path) + , callback_(callback) + , socket_connector_( + new SocketConnector(address, bind_callback(&HttpClient::on_socket_connect, this))) + , request_timeout_ms_(CASS_DEFAULT_CONNECT_TIMEOUT_MS) + , status_code_(0) { + http_parser_init(&parser_, HTTP_RESPONSE); + http_parser_settings_init(&parser_settings_); + + parser_.data = this; + parser_settings_.on_status = on_status; + parser_settings_.on_header_field = on_header_field; + parser_settings_.on_header_value = on_header_value; + parser_settings_.on_body = on_body; + parser_settings_.on_message_complete = on_message_complete; +} + +HttpClient* HttpClient::with_settings(const SocketSettings& settings) { + socket_connector_->with_settings(settings); + return this; +} + +HttpClient* HttpClient::with_request_timeout_ms(uint64_t request_timeout_ms) { + request_timeout_ms_ = request_timeout_ms; + return this; +} + +void HttpClient::request(uv_loop_t* loop) { + inc_ref(); + socket_connector_->connect(loop); + if (request_timeout_ms_ > 0) { + request_timer_.start(loop, request_timeout_ms_, bind_callback(&HttpClient::on_timeout, this)); + } +} + +void HttpClient::cancel() { + error_code_ = HTTP_CLIENT_CANCELED; + socket_connector_->cancel(); + if (socket_) socket_->close(); + request_timer_.stop(); +} + +void HttpClient::on_socket_connect(SocketConnector* connector) { + if (connector->is_ok()) { + socket_ = connector->release_socket(); + if (connector->ssl_session()) { + socket_->set_handler( + new HttpClientSslSocketHandler(connector->ssl_session().release(), this)); + } else { + socket_->set_handler(new HttpClientSocketHandler(this)); + } + + OStringStream ss; + ss << "GET " << path_ << " HTTP/1.0\r\n" // HTTP/1.0 ensures chunked responses are not sent + << "Host: " << socket_->address().to_string(true) << "\r\n" + << "User-Agent: cpp-driver/" << driver_version() << "\r\nAccept: */*\r\n\r\n"; + + String request = ss.str(); + socket_->write_and_flush(new BufferSocketRequest(Buffer(request.c_str(), request.size()))); + } else { + if (!connector->is_canceled()) { + error_code_ = HTTP_CLIENT_ERROR_SOCKET; + error_message_ = "Failed to establish HTTP connection: " + connector->error_message(); + } + finish(); + } +} + +void HttpClient::on_read(char* buf, ssize_t nread) { + if (is_canceled()) return; + + if (nread > 0) { + size_t parsed = http_parser_execute(&parser_, &parser_settings_, buf, nread); + if (parsed < static_cast(nread)) { + error_code_ = HTTP_CLIENT_ERROR_PARSING; + OStringStream ss; + enum http_errno err = HTTP_PARSER_ERRNO(&parser_); + ss << "HTTP parsing error (" << http_errno_name(err) << "):" << http_errno_description(err); + error_message_ = ss.str(); + socket_->close(); + } + } else if (is_ok() && status_code_ == 0) { // Make sure there wasn't an existing error + error_code_ = HTTP_CLIENT_ERROR_CLOSED; + error_message_ = "HTTP connection prematurely closed"; + } +} + +void HttpClient::on_timeout(Timer* timer) { + error_code_ = HTTP_CLIENT_ERROR_TIMEOUT; + OStringStream ss; + ss << "HTTP request timed out after " << request_timeout_ms_ << " ms"; + error_message_ = ss.str(); + socket_connector_->cancel(); + if (socket_) socket_->close(); +} + +int HttpClient::on_status(http_parser* parser, const char* buf, size_t len) { + HttpClient* self = static_cast(parser->data); + return self->handle_status(parser->status_code); +} + +int HttpClient::handle_status(unsigned status_code) { + if (status_code < 200 || status_code > 299) { + error_code_ = HTTP_CLIENT_ERROR_HTTP_STATUS; + } + status_code_ = status_code; + return 0; +} + +int HttpClient::on_header_field(http_parser* parser, const char* buf, size_t len) { + HttpClient* self = static_cast(parser->data); + return self->handle_header_field(buf, len); +} + +int HttpClient::handle_header_field(const char* buf, size_t len) { + current_header_.assign(buf, len); + return 0; +} + +int HttpClient::on_header_value(http_parser* parser, const char* buf, size_t len) { + HttpClient* self = static_cast(parser->data); + return self->handle_header_value(buf, len); +} + +int HttpClient::handle_header_value(const char* buf, size_t len) { + if (StringRef(current_header_).iequals("content-type")) { + content_type_.assign(buf, len); + } + return 0; +} + +int HttpClient::on_body(http_parser* parser, const char* buf, size_t len) { + HttpClient* self = static_cast(parser->data); + return self->handle_body(buf, len); +} + +int HttpClient::handle_body(const char* buf, size_t len) { + response_body_.assign(buf, len); + return 0; +} + +int HttpClient::on_message_complete(http_parser* parser) { + HttpClient* self = static_cast(parser->data); + return self->handle_message_complete(); +} + +int HttpClient::handle_message_complete() { + socket_->close(); + return 0; +} + +void HttpClient::finish() { + request_timer_.stop(); + if (callback_) callback_(this); + dec_ref(); +} diff --git a/src/http_client.hpp b/src/http_client.hpp new file mode 100644 index 000000000..c1a80749f --- /dev/null +++ b/src/http_client.hpp @@ -0,0 +1,106 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef DATASTAX_INTERNAL_HTTP_CLIENT_HPP +#define DATASTAX_INTERNAL_HTTP_CLIENT_HPP + +#include "address.hpp" +#include "callback.hpp" +#include "cloud_secure_connection_config.hpp" +#include "event_loop.hpp" +#include "http_parser.h" +#include "ref_counted.hpp" +#include "socket_connector.hpp" +#include "string.hpp" + +namespace datastax { namespace internal { namespace core { + +class HttpClient : public RefCounted { +public: + typedef SharedRefPtr Ptr; + typedef internal::Callback Callback; + + enum HttpClientError { + HTTP_CLIENT_OK, + HTTP_CLIENT_CANCELED, + HTTP_CLIENT_ERROR_SOCKET, + HTTP_CLIENT_ERROR_PARSING, + HTTP_CLIENT_ERROR_HTTP_STATUS, + HTTP_CLIENT_ERROR_TIMEOUT, + HTTP_CLIENT_ERROR_CLOSED + }; + + HttpClient(const Address& address, const String& path, const Callback& callback); + + HttpClient* with_settings(const SocketSettings& settings); + + HttpClient* with_request_timeout_ms(uint64_t request_timeout_ms); + + bool is_ok() const { return error_code_ == HTTP_CLIENT_OK; } + bool is_error_status_code() const { return error_code_ == HTTP_CLIENT_ERROR_HTTP_STATUS; } + bool is_canceled() const { return error_code_ == HTTP_CLIENT_CANCELED; } + HttpClientError error_code() const { return error_code_; } + String error_message() const { return error_message_; } + unsigned status_code() const { return status_code_; } + const String& content_type() const { return content_type_; } + const String& response_body() const { return response_body_; } + + void request(uv_loop_t* loop); + void cancel(); + +private: + friend class HttpClientSocketHandler; + friend class HttpClientSslSocketHandler; + +private: + void on_socket_connect(SocketConnector* connector); + void on_read(char* buf, ssize_t nread); + void on_timeout(Timer* timer); + + static int on_status(http_parser* parser, const char* buf, size_t len); + int handle_status(unsigned status_code); + static int on_header_field(http_parser* parser, const char* buf, size_t len); + int handle_header_field(const char* buf, size_t len); + static int on_header_value(http_parser* parser, const char* buf, size_t len); + int handle_header_value(const char* buf, size_t len); + static int on_body(http_parser* parser, const char* buf, size_t len); + int handle_body(const char* buf, size_t len); + static int on_message_complete(http_parser* parser); + int handle_message_complete(); + + void finish(); + +private: + HttpClientError error_code_; + String error_message_; + Address address_; + String path_; + Callback callback_; + SocketConnector::Ptr socket_connector_; + Socket::Ptr socket_; + Timer request_timer_; + uint64_t request_timeout_ms_; + http_parser parser_; + http_parser_settings parser_settings_; + String current_header_; + unsigned status_code_; + String content_type_; + String response_body_; +}; + +}}} // namespace datastax::internal::core + +#endif diff --git a/src/json.hpp b/src/json.hpp index 665cdf583..620536e55 100644 --- a/src/json.hpp +++ b/src/json.hpp @@ -73,10 +73,12 @@ class Allocator { typedef datastax::rapidjson::UTF8<> UTF8; typedef datastax::rapidjson::MemoryPoolAllocator MemoryPoolAllocator; +typedef datastax::rapidjson::MemoryStream MemoryStream; typedef datastax::rapidjson::GenericDocument Document; typedef datastax::rapidjson::GenericValue Value; typedef datastax::rapidjson::GenericStringBuffer StringBuffer; +typedef datastax::rapidjson::AutoUTFInputStream AutoUTFMemoryInputStream; template reserve(hosts.size()); std::transform(hosts.begin(), hosts.end(), std::back_inserter(*hosts_), GetHost()); for (HostMap::const_iterator i = hosts.begin(), end = hosts.end(); i != end; ++i) { i->second->enable_latency_tracking(settings_.scale_ns, settings_.min_measured); } - ChainedLoadBalancingPolicy::init(connected_host, hosts, random); + ChainedLoadBalancingPolicy::init(connected_host, hosts, random, local_dc); } void LatencyAwarePolicy::register_handles(uv_loop_t* loop) { start_timer(loop); } diff --git a/src/latency_aware_policy.hpp b/src/latency_aware_policy.hpp index 3506d8bf2..178752a4a 100644 --- a/src/latency_aware_policy.hpp +++ b/src/latency_aware_policy.hpp @@ -50,7 +50,8 @@ class LatencyAwarePolicy : public ChainedLoadBalancingPolicy { virtual ~LatencyAwarePolicy() {} - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random); + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc); virtual void register_handles(uv_loop_t* loop); virtual void close_handles(); diff --git a/src/list_policy.cpp b/src/list_policy.cpp index 85335b20f..8e64c15e6 100644 --- a/src/list_policy.cpp +++ b/src/list_policy.cpp @@ -22,7 +22,8 @@ using namespace datastax; using namespace datastax::internal; using namespace datastax::internal::core; -void ListPolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { +void ListPolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) { HostMap valid_hosts; for (HostMap::const_iterator i = hosts.begin(), end = hosts.end(); i != end; ++i) { const Host::Ptr& host = i->second; @@ -35,7 +36,7 @@ void ListPolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Ran LOG_ERROR("No valid hosts available for list policy"); } - ChainedLoadBalancingPolicy::init(connected_host, valid_hosts, random); + ChainedLoadBalancingPolicy::init(connected_host, valid_hosts, random, local_dc); } CassHostDistance ListPolicy::distance(const Host::Ptr& host) const { diff --git a/src/list_policy.hpp b/src/list_policy.hpp index 72de4d972..e52e4b8b8 100644 --- a/src/list_policy.hpp +++ b/src/list_policy.hpp @@ -30,7 +30,8 @@ class ListPolicy : public ChainedLoadBalancingPolicy { virtual ~ListPolicy() {} - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random); + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc); virtual CassHostDistance distance(const Host::Ptr& host) const; diff --git a/src/load_balancing.hpp b/src/load_balancing.hpp index 38e1c8faa..ba60928a9 100644 --- a/src/load_balancing.hpp +++ b/src/load_balancing.hpp @@ -86,7 +86,8 @@ class LoadBalancingPolicy : public RefCounted { virtual ~LoadBalancingPolicy() {} - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) = 0; + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) = 0; virtual void register_handles(uv_loop_t* loop) {} virtual void close_handles() {} @@ -122,8 +123,9 @@ class ChainedLoadBalancingPolicy : public LoadBalancingPolicy { virtual ~ChainedLoadBalancingPolicy() {} - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { - return child_policy_->init(connected_host, hosts, random); + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) { + return child_policy_->init(connected_host, hosts, random, local_dc); } virtual const LoadBalancingPolicy::Ptr& child_policy() const { return child_policy_; } diff --git a/src/macros.hpp b/src/macros.hpp index 4b7e7bee1..c625f84a8 100644 --- a/src/macros.hpp +++ b/src/macros.hpp @@ -18,6 +18,7 @@ #define DATASTAX_INTERNAL_MACROS_HPP #include +#include #define SAFE_STRLEN(s) ((s) ? strlen(s) : 0) diff --git a/src/md5.cpp b/src/md5.cpp index 207b3cfb2..724ca2ced 100644 --- a/src/md5.cpp +++ b/src/md5.cpp @@ -116,33 +116,33 @@ void Md5::final(uint8_t* result) { memset(&buffer_[used], 0, free - 8); lo_ <<= 3; - buffer_[56] = lo_; - buffer_[57] = lo_ >> 8; - buffer_[58] = lo_ >> 16; - buffer_[59] = lo_ >> 24; - buffer_[60] = hi_; - buffer_[61] = hi_ >> 8; - buffer_[62] = hi_ >> 16; - buffer_[63] = hi_ >> 24; + buffer_[56] = static_cast(lo_); + buffer_[57] = static_cast(lo_ >> 8); + buffer_[58] = static_cast(lo_ >> 16); + buffer_[59] = static_cast(lo_ >> 24); + buffer_[60] = static_cast(hi_); + buffer_[61] = static_cast(hi_ >> 8); + buffer_[62] = static_cast(hi_ >> 16); + buffer_[63] = static_cast(hi_ >> 24); body(buffer_, 64); - result[0] = a_; - result[1] = a_ >> 8; - result[2] = a_ >> 16; - result[3] = a_ >> 24; - result[4] = b_; - result[5] = b_ >> 8; - result[6] = b_ >> 16; - result[7] = b_ >> 24; - result[8] = c_; - result[9] = c_ >> 8; - result[10] = c_ >> 16; - result[11] = c_ >> 24; - result[12] = d_; - result[13] = d_ >> 8; - result[14] = d_ >> 16; - result[15] = d_ >> 24; + result[0] = static_cast(a_); + result[1] = static_cast(a_ >> 8); + result[2] = static_cast(a_ >> 16); + result[3] = static_cast(a_ >> 24); + result[4] = static_cast(b_); + result[5] = static_cast(b_ >> 8); + result[6] = static_cast(b_ >> 16); + result[7] = static_cast(b_ >> 24); + result[8] = static_cast(c_); + result[9] = static_cast(c_ >> 8); + result[10] = static_cast(c_ >> 16); + result[11] = static_cast(c_ >> 24); + result[12] = static_cast(d_); + result[13] = static_cast(d_ >> 8); + result[14] = static_cast(d_ >> 16); + result[15] = static_cast(d_ >> 24); memset(this, 0, sizeof(Md5)); } diff --git a/src/name_resolver.hpp b/src/name_resolver.hpp index a234f924e..002b16adc 100644 --- a/src/name_resolver.hpp +++ b/src/name_resolver.hpp @@ -72,8 +72,8 @@ class NameResolver : public RefCounted { timer_.start(loop, timeout, bind_callback(&NameResolver::on_timeout, this)); } - int rc = - uv_getnameinfo(loop, &req_, on_resolve, static_cast(address_).addr(), flags); + Address::SocketStorage storage; + int rc = uv_getnameinfo(loop, &req_, on_resolve, address_.to_sockaddr(&storage), flags); if (rc != 0) { status_ = FAILED_BAD_PARAM; diff --git a/src/pooled_connection.cpp b/src/pooled_connection.cpp index bc46e3c25..edf0ff5b4 100644 --- a/src/pooled_connection.cpp +++ b/src/pooled_connection.cpp @@ -88,7 +88,7 @@ void ChainedSetKeyspaceCallback::on_internal_timeout() { chained_callback_->on_r void ChainedSetKeyspaceCallback::on_result_response(ResponseMessage* response) { ResultResponse* result = static_cast(response->response_body().get()); if (result->kind() == CASS_RESULT_KIND_SET_KEYSPACE) { - if (!connection_->write_and_flush(chained_callback_)) { + if (connection_->write_and_flush(chained_callback_) < 0) { // Try on the same host but a different connection chained_callback_->on_retry_current_host(); } @@ -107,8 +107,8 @@ PooledConnection::PooledConnection(ConnectionPool* pool, const Connection::Ptr& connection_->set_listener(this); } -bool PooledConnection::write(RequestCallback* callback) { - bool result = false; +int32_t PooledConnection::write(RequestCallback* callback) { + int32_t result; const String& keyspace(pool_->keyspace()); if (keyspace != connection_->keyspace()) { LOG_DEBUG("Setting keyspace %s on connection(%p) pool(%p)", keyspace.c_str(), @@ -119,9 +119,10 @@ bool PooledConnection::write(RequestCallback* callback) { result = connection_->write(RequestCallback::Ptr(callback)); } - if (result) { + if (result > 0) { pool_->requires_flush(this, ConnectionPool::Protected()); } + return result; } @@ -142,6 +143,8 @@ int PooledConnection::inflight_request_count() const { return connection_->inflight_request_count(); } +bool PooledConnection::is_closing() const { return connection_->is_closing(); } + void PooledConnection::on_read() { if (event_loop_) { event_loop_->maybe_start_io_time(); diff --git a/src/pooled_connection.hpp b/src/pooled_connection.hpp index 272dac5e9..2a902da2b 100644 --- a/src/pooled_connection.hpp +++ b/src/pooled_connection.hpp @@ -51,10 +51,9 @@ class PooledConnection * the connection pool manager flushes the request. * * @param callback A request callback that handles the request. - * @return Returns true if the request was written, otherwise, an error - * occurred. + * @return The number of bytes written, or negative if an error occurred. */ - bool write(RequestCallback* callback); + int32_t write(RequestCallback* callback); /** * Flush pending writes. @@ -73,6 +72,13 @@ class PooledConnection */ int inflight_request_count() const; + /** + * Determine if the connection is closing. + * + * @return Returns true if closing. + */ + bool is_closing() const; + public: const String& keyspace() const { return connection_->keyspace(); } // Test only diff --git a/src/prepare_host_handler.cpp b/src/prepare_host_handler.cpp index 0b0c4bd2a..1345ae8d5 100644 --- a/src/prepare_host_handler.cpp +++ b/src/prepare_host_handler.cpp @@ -111,8 +111,8 @@ void PrepareHostHandler::prepare_next() { // Set the keyspace in case per request keyspaces are supported prepare_request->set_keyspace(current_keyspace_); - if (!connection_->write( - PrepareCallback::Ptr(new PrepareCallback(prepare_request, Ptr(this))))) { + PrepareCallback::Ptr callback(new PrepareCallback(prepare_request, Ptr(this))); + if (connection_->write(callback) < 0) { LOG_WARN("Failed to write prepare request while preparing all queries on host %s", host_->address_string().c_str()); close(); @@ -134,8 +134,8 @@ bool PrepareHostHandler::check_and_set_keyspace() { const String& keyspace((*current_entry_it_)->keyspace()); if (keyspace != current_keyspace_) { - if (!connection_->write_and_flush( - PrepareCallback::Ptr(new SetKeyspaceCallback(keyspace, Ptr(this))))) { + PrepareCallback::Ptr callback(new SetKeyspaceCallback(keyspace, Ptr(this))); + if (connection_->write_and_flush(callback) < 0) { LOG_WARN("Failed to write \"USE\" keyspace request while preparing all queries on host %s", host_->address_string().c_str()); close(); diff --git a/src/prepare_request.cpp b/src/prepare_request.cpp index d9a4eaf06..72b740057 100644 --- a/src/prepare_request.cpp +++ b/src/prepare_request.cpp @@ -45,7 +45,7 @@ int PrepareRequest::encode(ProtocolVersion version, RequestCallback* callback, size_t pos = buf.encode_int32(0, flags); if (!keyspace().empty()) { - buf.encode_string(pos, keyspace().data(), keyspace().size()); + buf.encode_string(pos, keyspace().data(), static_cast(keyspace().size())); } } return length; diff --git a/src/prepared.hpp b/src/prepared.hpp index fdbcc00ed..3657aaa1c 100644 --- a/src/prepared.hpp +++ b/src/prepared.hpp @@ -69,7 +69,8 @@ class PreparedMetadata { , keyspace_(keyspace) , result_metadata_id_(sizeof(uint16_t) + result_metadata_id.size()) , result_(result) { - result_metadata_id_.encode_string(0, result_metadata_id.data(), result_metadata_id.size()); + result_metadata_id_.encode_string(0, result_metadata_id.data(), + static_cast(result_metadata_id.size())); } const String& query() const { return query_; } diff --git a/src/protocol.hpp b/src/protocol.hpp index 58e747880..e341b0063 100644 --- a/src/protocol.hpp +++ b/src/protocol.hpp @@ -83,6 +83,13 @@ class ProtocolVersion { */ bool is_valid() const; + /** + * Check to see if the protocol version's value is DSE. + * + * @return true if DSE, otherwise false; + */ + bool is_dse() const; + /** * Check to see if the protocol version is a beta version. * diff --git a/src/query_request.cpp b/src/query_request.cpp index c120b4a23..186650590 100644 --- a/src/query_request.cpp +++ b/src/query_request.cpp @@ -29,10 +29,10 @@ int QueryRequest::encode(ProtocolVersion version, RequestCallback* callback, int32_t result; int32_t length = encode_query_or_id(bufs); if (has_names_for_values()) { - length += encode_begin(version, value_names_->size(), callback, bufs); + length += encode_begin(version, static_cast(value_names_->size()), callback, bufs); result = encode_values_with_names(version, callback, bufs); } else { - length += encode_begin(version, elements().size(), callback, bufs); + length += encode_begin(version, static_cast(elements().size()), callback, bufs); result = encode_values(version, callback, bufs); } if (result < 0) return result; diff --git a/src/query_request.hpp b/src/query_request.hpp index 6cd662362..b45920ec9 100644 --- a/src/query_request.hpp +++ b/src/query_request.hpp @@ -50,7 +50,7 @@ class QueryRequest : public Statement { ValueName(const String& name) : name(name) , buf(sizeof(uint16_t) + name.size()) { - buf.encode_string(0, name.data(), name.size()); + buf.encode_string(0, name.data(), static_cast(name.size())); } String name; diff --git a/src/request.cpp b/src/request.cpp index 0b9d23c82..3442132e8 100644 --- a/src/request.cpp +++ b/src/request.cpp @@ -54,7 +54,7 @@ void cass_custom_payload_free(CassCustomPayload* payload) { payload->dec_ref(); void CustomPayload::set(const char* name, size_t name_length, const uint8_t* value, size_t value_size) { Buffer buf(sizeof(uint16_t) + name_length + sizeof(int32_t) + value_size); - size_t pos = buf.encode_string(0, name, name_length); + size_t pos = buf.encode_string(0, name, static_cast(name_length)); buf.encode_bytes(pos, reinterpret_cast(value), value_size); items_[String(name, name_length)] = buf; } diff --git a/src/request.hpp b/src/request.hpp index 9ec31b7a3..5a4c28797 100644 --- a/src/request.hpp +++ b/src/request.hpp @@ -81,7 +81,8 @@ class Request : public RefCounted { REQUEST_ERROR_UNSUPPORTED_PROTOCOL = SocketRequest::SOCKET_REQUEST_ERROR_LAST_ENTRY, REQUEST_ERROR_BATCH_WITH_NAMED_VALUES, REQUEST_ERROR_PARAMETER_UNSET, - REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS + REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS, + REQUEST_ERROR_NO_DATA_WRITTEN }; Request(uint8_t opcode) diff --git a/src/request_callback.cpp b/src/request_callback.cpp index 7a1c38959..402f4d2ea 100644 --- a/src/request_callback.cpp +++ b/src/request_callback.cpp @@ -58,6 +58,7 @@ bool RequestCallback::skip_metadata() const { int32_t RequestCallback::encode(BufferVec* bufs) { const ProtocolVersion version = protocol_version_; if (version < CASS_LOWEST_SUPPORTED_PROTOCOL_VERSION) { + on_error(CASS_ERROR_LIB_MESSAGE_ENCODE, "Operation unsupported by this protocol version"); return Request::REQUEST_ERROR_UNSUPPORTED_PROTOCOL; } @@ -85,10 +86,10 @@ int32_t RequestCallback::encode(BufferVec* bufs) { Buffer buf(header_size); size_t pos = 0; - pos = buf.encode_byte(pos, version.value()); - pos = buf.encode_byte(pos, flags); + pos = buf.encode_byte(pos, static_cast(version.value())); + pos = buf.encode_byte(pos, static_cast(flags)); - pos = buf.encode_int16(pos, stream_); + pos = buf.encode_int16(pos, static_cast(stream_)); pos = buf.encode_byte(pos, req->opcode()); buf.encode_int32(pos, length); @@ -245,8 +246,7 @@ ResultResponse::Ptr ChainedRequestCallback::result(const String& key) const { void ChainedRequestCallback::on_internal_write(Connection* connection) { if (chain_) { - int32_t result = connection->write_and_flush(chain_); - if (result == Request::REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS) { + if (connection->write_and_flush(chain_) < 0) { on_error(CASS_ERROR_LIB_NO_STREAMS, "No streams available when attempting to write chained request"); } diff --git a/src/request_handler.cpp b/src/request_handler.cpp index 281c59ea9..4c46114ed 100644 --- a/src/request_handler.cpp +++ b/src/request_handler.cpp @@ -36,6 +36,20 @@ using namespace datastax; using namespace datastax::internal; using namespace datastax::internal::core; +static String to_hex(const String& byte_id) { + static const char half_byte_to_hex[] = { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; + OStringStream ss; + + const char* data = byte_id.data(); + for (size_t i = 0; i < byte_id.length(); ++i) { + uint8_t byte = static_cast(data[i]); + ss << half_byte_to_hex[(byte >> 4) & 0x0F]; + ss << half_byte_to_hex[byte & 0x0F]; + } + return ss.str(); +} + class SingleHostQueryPlan : public QueryPlan { public: SingleHostQueryPlan(const Address& address) @@ -53,7 +67,7 @@ class SingleHostQueryPlan : public QueryPlan { class PrepareCallback : public SimpleRequestCallback { public: - PrepareCallback(const String& query, RequestExecution* request_execution); + PrepareCallback(const String& query, const String& id, RequestExecution* request_execution); private: class PrepareRequest : public core::PrepareRequest { @@ -72,21 +86,29 @@ class PrepareCallback : public SimpleRequestCallback { private: RequestExecution::Ptr request_execution_; + String id_; }; -PrepareCallback::PrepareCallback(const String& query, RequestExecution* request_execution) +PrepareCallback::PrepareCallback(const String& query, const String& id, + RequestExecution* request_execution) : SimpleRequestCallback( Request::ConstPtr(new PrepareRequest(query, request_execution->request()->keyspace(), request_execution->request_timeout_ms()))) - , request_execution_(request_execution) {} + , request_execution_(request_execution) + , id_(id) {} void PrepareCallback::on_internal_set(ResponseMessage* response) { switch (response->opcode()) { case CQL_OPCODE_RESULT: { ResultResponse* result = static_cast(response->response_body().get()); if (result->kind() == CASS_RESULT_KIND_PREPARED) { - request_execution_->notify_result_metadata_changed(request(), result); - request_execution_->on_retry_current_host(); + String result_id = result->prepared_id().to_string(); + if (id_ != result_id) { + request_execution_->notify_prepared_id_mismatch(id_, result_id); + } else { + request_execution_->notify_result_metadata_changed(request(), result); + request_execution_->on_retry_current_host(); + } } else { request_execution_->on_retry_next_host(); } @@ -306,18 +328,54 @@ void RequestHandler::internal_retry(RequestExecution* request_execution) { return; } - bool is_successful = false; - while (request_execution->current_host()) { + bool is_done = false; + while (!is_done && request_execution->current_host()) { PooledConnection::Ptr connection = manager_->find_least_busy(request_execution->current_host()->address()); - if (connection && connection->write(request_execution)) { - is_successful = true; - break; + if (connection) { + int32_t result = connection->write(request_execution); + + if (result > 0) { + is_done = true; + } else { + switch (result) { + case SocketRequest::SOCKET_REQUEST_ERROR_CLOSED: + // This should never happen, but retry with next host if it does. + request_execution->next_host(); + break; + + case SocketRequest::SOCKET_REQUEST_ERROR_NO_HANDLER: + set_error(CASS_ERROR_LIB_WRITE_ERROR, + "Socket is not properly configured with a handler"); + is_done = true; + break; + + case Request::REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS: + // Retry with next host + request_execution->next_host(); + break; + + case Request::REQUEST_ERROR_BATCH_WITH_NAMED_VALUES: + case Request::REQUEST_ERROR_PARAMETER_UNSET: + case Request::REQUEST_ERROR_UNSUPPORTED_PROTOCOL: + case Request::REQUEST_ERROR_NO_DATA_WRITTEN: + // Already handled with a specific error. + is_done = true; + break; + + default: + set_error(CASS_ERROR_LIB_WRITE_ERROR, "Unspecified write error occurred"); + is_done = true; + break; + } + } + } else { + // No connection available on the current host, move to the next host. + request_execution->next_host(); } - request_execution->next_host(); } - if (!is_successful) { + if (!request_execution->current_host()) { set_error(CASS_ERROR_LIB_NO_HOSTS_AVAILABLE, "All hosts in current policy attempted " "and were either unavailable or failed"); } @@ -335,7 +393,7 @@ void RequestExecution::on_execute_next(Timer* timer) { request_handler_->execute void RequestExecution::on_retry_current_host() { retry_current_host(); } void RequestExecution::on_retry_next_host() { - current_host_->decrement_inflight_requests(); + if (current_host_) current_host_->decrement_inflight_requests(); retry_next_host(); } @@ -392,14 +450,8 @@ void RequestExecution::on_set(ResponseMessage* response) { } void RequestExecution::on_error(CassError code, const String& message) { - current_host_->decrement_inflight_requests(); - - // Handle recoverable errors by retrying with the next host - if (code == CASS_ERROR_LIB_WRITE_ERROR || code == CASS_ERROR_LIB_UNABLE_TO_SET_KEYSPACE) { - retry_next_host(); - } else { - set_error(code, message); - } + if (current_host_) current_host_->decrement_inflight_requests(); + set_error(code, message); } void RequestExecution::notify_result_metadata_changed(const Request* request, @@ -431,6 +483,17 @@ void RequestExecution::notify_result_metadata_changed(const Request* request, } } +void RequestExecution::notify_prepared_id_mismatch(const String& expected_id, + const String& received_id) { + OStringStream ss; + ss << "ID mismatch while trying to prepare query (expected ID " << to_hex(expected_id) + << ", received ID " << to_hex(received_id) + << "). This prepared statement won't work anymore. This usually happens when you run a " + "'USE...' query after the statement was prepared."; + String message = ss.str(); + request_handler_->set_error(CASS_ERROR_LIB_UNEXPECTED_RESPONSE, message); +} + void RequestExecution::on_result_response(Connection* connection, ResponseMessage* response) { ResultResponse* result = static_cast(response->response_body().get()); @@ -572,14 +635,17 @@ void RequestExecution::on_error_response(Connection* connection, ResponseMessage } void RequestExecution::on_error_unprepared(Connection* connection, ErrorResponse* error) { - String query; + LOG_DEBUG("Unprepared error response returned for request: %s", + error->message().to_string().c_str()); + String query; + String id = error->prepared_id().to_string(); if (request()->opcode() == CQL_OPCODE_EXECUTE) { const ExecuteRequest* execute = static_cast(request()); query = execute->prepared()->query(); } else if (request()->opcode() == CQL_OPCODE_BATCH) { const BatchRequest* batch = static_cast(request()); - if (!batch->find_prepared_query(error->prepared_id().to_string(), &query)) { + if (!batch->find_prepared_query(id, &query)) { set_error(CASS_ERROR_LIB_UNEXPECTED_RESPONSE, "Unable to find prepared statement in batch statement"); return; @@ -591,7 +657,8 @@ void RequestExecution::on_error_unprepared(Connection* connection, ErrorResponse return; } - if (!connection->write_and_flush(RequestCallback::Ptr(new PrepareCallback(query, this)))) { + RequestCallback::Ptr callback(new PrepareCallback(query, id, this)); + if (connection->write_and_flush(callback) < 0) { // Try to prepare on the same host but on a different connection retry_current_host(); } diff --git a/src/request_handler.hpp b/src/request_handler.hpp index 9f6f0e3ae..ae257d9ac 100644 --- a/src/request_handler.hpp +++ b/src/request_handler.hpp @@ -275,6 +275,7 @@ class RequestExecution : public RequestCallback { void next_host() { current_host_ = request_handler_->next_host(RequestHandler::Protected()); } void notify_result_metadata_changed(const Request* request, ResultResponse* result_response); + void notify_prepared_id_mismatch(const String& expected_id, const String& received_id); virtual void on_retry_current_host(); virtual void on_retry_next_host(); diff --git a/src/request_processor.cpp b/src/request_processor.cpp index 709b0d624..e0d8d326d 100644 --- a/src/request_processor.cpp +++ b/src/request_processor.cpp @@ -145,7 +145,8 @@ RequestProcessorSettings::RequestProcessorSettings() , new_request_ratio(CASS_DEFAULT_NEW_REQUEST_RATIO) , max_tracing_wait_time_ms(CASS_DEFAULT_MAX_TRACING_DATA_WAIT_TIME_MS) , retry_tracing_wait_time_ms(CASS_DEFAULT_RETRY_TRACING_DATA_WAIT_TIME_MS) - , tracing_consistency(CASS_DEFAULT_TRACING_CONSISTENCY) { + , tracing_consistency(CASS_DEFAULT_TRACING_CONSISTENCY) + , address_factory(new DefaultAddressFactory()) { profiles.set_empty_key(""); } @@ -161,13 +162,15 @@ RequestProcessorSettings::RequestProcessorSettings(const Config& config) , new_request_ratio(config.new_request_ratio()) , max_tracing_wait_time_ms(config.max_tracing_wait_time_ms()) , retry_tracing_wait_time_ms(config.retry_tracing_wait_time_ms()) - , tracing_consistency(config.tracing_consistency()) {} + , tracing_consistency(config.tracing_consistency()) + , address_factory(create_address_factory_from_config(config)) {} RequestProcessor::RequestProcessor(RequestProcessorListener* listener, EventLoop* event_loop, const ConnectionPoolManager::Ptr& connection_pool_manager, const Host::Ptr& connected_host, const HostMap& hosts, const TokenMap::Ptr& token_map, - const RequestProcessorSettings& settings, Random* random) + const RequestProcessorSettings& settings, Random* random, + const String& local_dc) : connection_pool_manager_(connection_pool_manager) , listener_(listener ? listener : &nop_request_processor_listener__) , event_loop_(event_loop) @@ -210,7 +213,7 @@ RequestProcessor::RequestProcessor(RequestProcessorListener* listener, EventLoop LoadBalancingPolicy::Vec policies = load_balancing_policies(); for (LoadBalancingPolicy::Vec::const_iterator it = policies.begin(); it != policies.end(); ++it) { // Initialize the load balancing policies - (*it)->init(connected_host, hosts, random); + (*it)->init(connected_host, hosts, random, local_dc); } listener_->on_connect(this); @@ -337,8 +340,9 @@ bool RequestProcessor::on_wait_for_tracing_data(const RequestHandler::Ptr& reque bool RequestProcessor::on_wait_for_schema_agreement(const RequestHandler::Ptr& request_handler, const Host::Ptr& current_host, const Response::Ptr& response) { - SchemaAgreementHandler::Ptr handler(new SchemaAgreementHandler( - request_handler, current_host, response, this, settings_.max_schema_wait_time_ms)); + SchemaAgreementHandler::Ptr handler( + new SchemaAgreementHandler(request_handler, current_host, response, this, + settings_.max_schema_wait_time_ms, settings_.address_factory)); return write_wait_callback(request_handler, current_host, handler->callback()); } @@ -579,7 +583,7 @@ bool RequestProcessor::write_wait_callback(const RequestHandler::Ptr& request_ha const RequestCallback::Ptr& callback) { PooledConnection::Ptr connection( connection_pool_manager_->find_least_busy(current_host->address())); - if (connection && connection->write(callback.get())) { + if (connection && connection->write(callback.get()) > 0) { // Stop the original request timer now that we have a response and // are waiting for the maximum wait time of the handler. request_handler->stop_timer(); diff --git a/src/request_processor.hpp b/src/request_processor.hpp index 56bb45673..67b0bf47c 100644 --- a/src/request_processor.hpp +++ b/src/request_processor.hpp @@ -134,6 +134,8 @@ struct RequestProcessorSettings { uint64_t retry_tracing_wait_time_ms; CassConsistency tracing_consistency; + + AddressFactory::Ptr address_factory; }; /** @@ -163,12 +165,13 @@ class RequestProcessor * @param token_map The current token map. * @param settings The current settings for the request processor. * @param random A RNG for randomizing hosts in the load balancing policies. + * @param local_dc The local datacenter for initializing the load balancing policies. */ RequestProcessor(RequestProcessorListener* listener, EventLoop* event_loop, const ConnectionPoolManager::Ptr& connection_pool_manager, const Host::Ptr& connected_host, const HostMap& hosts, const TokenMap::Ptr& token_map, const RequestProcessorSettings& settings, - Random* random); + Random* random, const String& local_dc); /** * Close/Terminate the request request processor (thread-safe). diff --git a/src/request_processor_initializer.cpp b/src/request_processor_initializer.cpp index fb3b2d693..9d2435644 100644 --- a/src/request_processor_initializer.cpp +++ b/src/request_processor_initializer.cpp @@ -38,11 +38,9 @@ class RunInitializeProcessor : public Task { }}} // namespace datastax::internal::core -RequestProcessorInitializer::RequestProcessorInitializer(const Host::Ptr& connected_host, - ProtocolVersion protocol_version, - const HostMap& hosts, - const TokenMap::Ptr& token_map, - const Callback& callback) +RequestProcessorInitializer::RequestProcessorInitializer( + const Host::Ptr& connected_host, ProtocolVersion protocol_version, const HostMap& hosts, + const TokenMap::Ptr& token_map, const String& local_dc, const Callback& callback) : event_loop_(NULL) , listener_(NULL) , metrics_(NULL) @@ -51,6 +49,7 @@ RequestProcessorInitializer::RequestProcessorInitializer(const Host::Ptr& connec , protocol_version_(protocol_version) , hosts_(hosts) , token_map_(token_map) + , local_dc_(local_dc) , error_code_(REQUEST_PROCESSOR_OK) , callback_(callback) { uv_mutex_init(&mutex_); @@ -158,7 +157,8 @@ void RequestProcessorInitializer::on_initialize(ConnectionPoolManagerInitializer error_message_ = "Unable to connect to any hosts"; } else { processor_.reset(new RequestProcessor(listener_, event_loop_, initializer->release_manager(), - connected_host_, hosts_, token_map_, settings_, random_)); + connected_host_, hosts_, token_map_, settings_, random_, + local_dc_)); int rc = processor_->init(RequestProcessor::Protected()); if (rc != 0) { diff --git a/src/request_processor_initializer.hpp b/src/request_processor_initializer.hpp index 1efea75de..0914dcd7b 100644 --- a/src/request_processor_initializer.hpp +++ b/src/request_processor_initializer.hpp @@ -59,12 +59,13 @@ class RequestProcessorInitializer * @param protocol_version The highest negotiated protocol for the cluster. * @param hosts A mapping of available hosts in the cluster. * @param token_map A token map. + * @param local_dc The local datacenter for initializing the load balancing policies. * @param callback A callback that is called when the processor is initialized * or if an error occurred. */ RequestProcessorInitializer(const Host::Ptr& connected_host, ProtocolVersion protocol_version, const HostMap& hosts, const TokenMap::Ptr& token_map, - const Callback& callback); + const String& local_dc, const Callback& callback); ~RequestProcessorInitializer(); /** @@ -165,6 +166,7 @@ class RequestProcessorInitializer const ProtocolVersion protocol_version_; HostMap hosts_; const TokenMap::Ptr token_map_; + String local_dc_; RequestProcessorError error_code_; String error_message_; diff --git a/src/resolver.hpp b/src/resolver.hpp index a284b9ce4..9b48a44a8 100644 --- a/src/resolver.hpp +++ b/src/resolver.hpp @@ -55,7 +55,7 @@ class Resolver : public RefCounted { req_.data = this; } - ~Resolver() {} + uv_loop_t* loop() { return req_.loop; } const String& hostname() { return hostname_; } int port() { return port_; } @@ -130,8 +130,8 @@ class Resolver : public RefCounted { bool init_addresses(struct addrinfo* res) { bool status = false; do { - Address address; - if (address.init(res->ai_addr)) { + Address address(res->ai_addr); + if (address.is_valid_and_resolved()) { addresses_.push_back(address); status = true; } diff --git a/src/round_robin_policy.cpp b/src/round_robin_policy.cpp index 53c102efc..dd7f2ecff 100644 --- a/src/round_robin_policy.cpp +++ b/src/round_robin_policy.cpp @@ -32,7 +32,8 @@ RoundRobinPolicy::RoundRobinPolicy() RoundRobinPolicy::~RoundRobinPolicy() { uv_rwlock_destroy(&available_rwlock_); } -void RoundRobinPolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { +void RoundRobinPolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) { available_.resize(hosts.size()); std::transform(hosts.begin(), hosts.end(), std::inserter(available_, available_.begin()), GetAddress()); diff --git a/src/round_robin_policy.hpp b/src/round_robin_policy.hpp index e09316813..f5b4f715d 100644 --- a/src/round_robin_policy.hpp +++ b/src/round_robin_policy.hpp @@ -30,7 +30,8 @@ class RoundRobinPolicy : public LoadBalancingPolicy { RoundRobinPolicy(); ~RoundRobinPolicy(); - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random); + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc); virtual CassHostDistance distance(const Host::Ptr& host) const; diff --git a/src/row.cpp b/src/row.cpp index b58f823a0..9dec4fe40 100644 --- a/src/row.cpp +++ b/src/row.cpp @@ -80,3 +80,13 @@ bool Row::get_string_by_name(const StringRef& name, String* out) const { *out = value->decoder().as_string(); return true; } + +bool Row::get_uuid_by_name(const StringRef& name, CassUuid* out) const { + const Value* value = get_by_name(name); + if (value == NULL || value->is_null() || value->value_type() != CASS_VALUE_TYPE_UUID || + value->value_type() == CASS_VALUE_TYPE_TIMEUUID) { + return false; + } + *out = value->as_uuid(); + return true; +} diff --git a/src/row.hpp b/src/row.hpp index 0d3072e6b..ff4318db5 100644 --- a/src/row.hpp +++ b/src/row.hpp @@ -40,6 +40,8 @@ class Row { bool get_string_by_name(const StringRef& name, String* out) const; + bool get_uuid_by_name(const StringRef& name, CassUuid* out) const; + const ResultResponse* result() const { return result_; } void set_result(ResultResponse* result) { result_ = result; } diff --git a/src/schema_agreement_handler.cpp b/src/schema_agreement_handler.cpp index 2a22dd219..2699f78a5 100644 --- a/src/schema_agreement_handler.cpp +++ b/src/schema_agreement_handler.cpp @@ -20,7 +20,7 @@ #define RETRY_SCHEMA_AGREEMENT_WAIT_MS 200 #define SELECT_LOCAL_SCHEMA "SELECT schema_version FROM system.local WHERE key='local'" -#define SELECT_PEERS_SCHEMA "SELECT peer, rpc_address, schema_version FROM system.peers" +#define SELECT_PEERS_SCHEMA "SELECT peer, rpc_address, host_id, schema_version FROM system.peers" using namespace datastax; using namespace datastax::internal::core; @@ -29,10 +29,12 @@ SchemaAgreementHandler::SchemaAgreementHandler(const RequestHandler::Ptr& reques const Host::Ptr& current_host, const Response::Ptr& response, SchemaAgreementListener* listener, - uint64_t max_wait_time_ms) + uint64_t max_wait_time_ms, + const AddressFactory::Ptr& address_factory) : WaitForHandler(request_handler, current_host, response, max_wait_time_ms, RETRY_SCHEMA_AGREEMENT_WAIT_MS) - , listener_(listener) {} + , listener_(listener) + , address_factory_(address_factory) {} ChainedRequestCallback::Ptr SchemaAgreementHandler::callback() { WaitforRequestVec requests; @@ -53,7 +55,7 @@ bool SchemaAgreementHandler::on_set(const ChainedRequestCallback::Ptr& callback) current_version = v->to_string_ref(); } } else { - LOG_DEBUG("No row found in %s's local system table", address_string().c_str()); + LOG_DEBUG("No row found in %s's local system table", host()->address_string().c_str()); } ResultResponse::Ptr peers_result(callback->result("peers")); @@ -63,8 +65,7 @@ bool SchemaAgreementHandler::on_set(const ChainedRequestCallback::Ptr& callback) const Row* row = rows.row(); Address address; - bool is_valid_address = determine_address_for_peer_host( - this->address(), row->get_by_name("peer"), row->get_by_name("rpc_address"), &address); + bool is_valid_address = address_factory_->create(row, this->host(), &address); if (is_valid_address && listener_->on_is_host_up(address)) { const Value* v = row->get_by_name("schema_version"); diff --git a/src/schema_agreement_handler.hpp b/src/schema_agreement_handler.hpp index de59a18f3..e9b37355b 100644 --- a/src/schema_agreement_handler.hpp +++ b/src/schema_agreement_handler.hpp @@ -17,6 +17,7 @@ #ifndef DATASTAX_INTERNAL_SCHEMA_CHANGE_HANDLER_HPP #define DATASTAX_INTERNAL_SCHEMA_CHANGE_HANDLER_HPP +#include "address_factory.hpp" #include "wait_for_handler.hpp" #include @@ -57,10 +58,11 @@ class SchemaAgreementHandler : public WaitForHandler { * @param listener A listener for determining host liveness. * @param max_wait_time_ms The maximum amount of time to wait for the data to * become available. + * @param address_factory Address factory for determining peer addresses. */ SchemaAgreementHandler(const RequestHandler::Ptr& request_handler, const Host::Ptr& current_host, const Response::Ptr& response, SchemaAgreementListener* listener, - uint64_t max_wait_time_ms); + uint64_t max_wait_time_ms, const AddressFactory::Ptr& address_factory); /** * Gets a request callback for executing queries on behalf of the handler. @@ -75,6 +77,7 @@ class SchemaAgreementHandler : public WaitForHandler { private: SchemaAgreementListener* const listener_; + AddressFactory::Ptr address_factory_; }; }}} // namespace datastax::internal::core diff --git a/src/serialization.hpp b/src/serialization.hpp index 3633bbb02..70f0a8037 100644 --- a/src/serialization.hpp +++ b/src/serialization.hpp @@ -66,8 +66,8 @@ inline char* encode_uint16(char* output, uint16_t value) { } inline const char* decode_uint16(const char* input, uint16_t& output) { - output = (static_cast(static_cast(input[1])) << 0) | - (static_cast(static_cast(input[0])) << 8); + output = static_cast((static_cast(static_cast(input[1])) << 0) | + (static_cast(static_cast(input[0])) << 8)); return input + sizeof(uint16_t); } @@ -78,8 +78,8 @@ inline char* encode_int16(char* output, int16_t value) { } inline const char* decode_int16(const char* input, int16_t& output) { - output = (static_cast(static_cast(input[1])) << 0) | - (static_cast(static_cast(input[0])) << 8); + output = static_cast((static_cast(static_cast(input[1])) << 0) | + (static_cast(static_cast(input[0])) << 8)); return input + sizeof(int16_t); } diff --git a/src/session.cpp b/src/session.cpp index 754752bdf..84949655a 100644 --- a/src/session.cpp +++ b/src/session.cpp @@ -193,14 +193,14 @@ class SessionInitializer : public RefCounted { SessionInitializer() { uv_mutex_destroy(&mutex_); } void initialize(const Host::Ptr& connected_host, ProtocolVersion protocol_version, - const HostMap& hosts, const TokenMap::Ptr& token_map) { + const HostMap& hosts, const TokenMap::Ptr& token_map, const String& local_dc) { inc_ref(); const size_t thread_count_io = remaining_ = session_->config().thread_count_io(); for (size_t i = 0; i < thread_count_io; ++i) { - RequestProcessorInitializer::Ptr initializer( - new RequestProcessorInitializer(connected_host, protocol_version, hosts, token_map, - bind_callback(&SessionInitializer::on_initialize, this))); + RequestProcessorInitializer::Ptr initializer(new RequestProcessorInitializer( + connected_host, protocol_version, hosts, token_map, local_dc, + bind_callback(&SessionInitializer::on_initialize, this))); RequestProcessorSettings settings(session_->config()); settings.connection_pool_settings.connection_settings.client_id = @@ -357,7 +357,8 @@ void Session::join() { } void Session::on_connect(const Host::Ptr& connected_host, ProtocolVersion protocol_version, - const HostMap& hosts, const TokenMap::Ptr& token_map) { + const HostMap& hosts, const TokenMap::Ptr& token_map, + const String& local_dc) { int rc = 0; if (hosts.empty()) { @@ -391,7 +392,7 @@ void Session::on_connect(const Host::Ptr& connected_host, ProtocolVersion protoc request_processor_count_ = 0; is_closing_ = false; SessionInitializer::Ptr initializer(new SessionInitializer(this)); - initializer->initialize(connected_host, protocol_version, hosts, token_map); + initializer->initialize(connected_host, protocol_version, hosts, token_map, local_dc); } void Session::on_close() { diff --git a/src/session.hpp b/src/session.hpp index 0d8b0f0ff..c6cde6f5a 100644 --- a/src/session.hpp +++ b/src/session.hpp @@ -53,7 +53,8 @@ class Session // Session base methods virtual void on_connect(const Host::Ptr& connected_host, ProtocolVersion protocol_version, - const HostMap& hosts, const TokenMap::Ptr& token_map); + const HostMap& hosts, const TokenMap::Ptr& token_map, + const String& local_dc); virtual void on_close(); diff --git a/src/session_base.cpp b/src/session_base.cpp index e37140eb5..6ccb4f7c5 100644 --- a/src/session_base.cpp +++ b/src/session_base.cpp @@ -159,7 +159,8 @@ void SessionBase::notify_closed() { } void SessionBase::on_connect(const Host::Ptr& connected_host, ProtocolVersion protocol_version, - const HostMap& hosts, const TokenMap::Ptr& token_map) { + const HostMap& hosts, const TokenMap::Ptr& token_map, + const String& local_dc) { notify_connected(); } @@ -185,8 +186,21 @@ void SessionBase::on_close(Cluster* cluster) { void SessionBase::on_initialize(ClusterConnector* connector) { if (connector->is_ok()) { cluster_ = connector->release_cluster(); + + // Handle default consistency level for DBaaS + StringMultimap::const_iterator it = cluster_->supported_options().find("PRODUCT_TYPE"); + if (it != cluster_->supported_options().end() && it->second[0] == CASS_DBAAS_PRODUCT_TYPE) { + config_.set_default_consistency(CASS_DEFAULT_DBAAS_CONSISTENCY); + + if (it->second.size() > 1) { + LOG_DEBUG("PRODUCT_TYPE has more than one type: %s", implode(it->second).c_str()); + } + } else { + config_.set_default_consistency(CASS_DEFAULT_CONSISTENCY); + } + on_connect(cluster_->connected_host(), cluster_->protocol_version(), - cluster_->available_hosts(), cluster_->token_map()); + cluster_->available_hosts(), cluster_->token_map(), cluster_->local_dc()); } else { assert(!connector->is_canceled() && "Cluster connection process canceled"); switch (connector->error_code()) { diff --git a/src/session_base.hpp b/src/session_base.hpp index ec3805c24..1c3e6c68f 100644 --- a/src/session_base.hpp +++ b/src/session_base.hpp @@ -112,9 +112,12 @@ class SessionBase : public ClusterListener { * connection. * @param hosts The current hosts in the cluster. * @param token_map The token map for the cluster. + * @param local_dc The local datacenter for the cluster determined by the metadata service for + * initializing the load balancing policies. */ virtual void on_connect(const Host::Ptr& connected_host, ProtocolVersion protocol_version, - const HostMap& hosts, const TokenMap::Ptr& token_map); + const HostMap& hosts, const TokenMap::Ptr& token_map, + const String& local_dc); /** * A callback called after the control connection fails to connect. By default diff --git a/src/socket.cpp b/src/socket.cpp index fe4f54eaa..76b176514 100644 --- a/src/socket.cpp +++ b/src/socket.cpp @@ -213,8 +213,13 @@ void SslSocketHandler::on_read(Socket* socket, ssize_t nread, const uv_buf_t* bu on_ssl_read(socket, decrypted, rc); } if (rc <= 0 && ssl_session_->has_error()) { - LOG_ERROR("Unable to decrypt data: %s", ssl_session_->error_message().c_str()); - socket->defunct(); + if (ssl_session_->error_code() == CASS_ERROR_SSL_CLOSED) { + LOG_DEBUG("SSL session closed"); + socket->close(); + } else { + LOG_ERROR("Unable to decrypt data: %s", ssl_session_->error_message().c_str()); + socket->defunct(); + } } } @@ -229,7 +234,7 @@ void SocketWriteBase::on_close() { int32_t SocketWriteBase::write(SocketRequest* request) { size_t last_buffer_size = buffers_.size(); int32_t request_size = request->encode(&buffers_); - if (request_size < 0) { + if (request_size <= 0) { buffers_.resize(last_buffer_size); // Rollback return request_size; } @@ -275,8 +280,7 @@ void SocketWriteBase::handle_write(uv_write_t* req, int status) { Socket::Socket(const Address& address, size_t max_reusable_write_objects) : is_defunct_(false) , max_reusable_write_objects_(max_reusable_write_objects) - , address_(address) - , address_string_(address.to_string()) { + , address_(address) { tcp_.data = this; } @@ -329,7 +333,7 @@ size_t Socket::flush() { } bool Socket::is_closing() const { - return uv_is_closing(reinterpret_cast(&tcp_)); + return uv_is_closing(reinterpret_cast(&tcp_)) != 0; } void Socket::close() { @@ -370,7 +374,7 @@ void Socket::on_close(uv_handle_t* handle) { } void Socket::handle_close() { - LOG_DEBUG("Socket(%p) to host %s closed", static_cast(this), address_string().c_str()); + LOG_DEBUG("Socket(%p) to host %s closed", static_cast(this), address_.to_string().c_str()); while (!pending_writes_.is_empty()) { SocketWriteBase* pending_write = pending_writes_.pop_front(); diff --git a/src/socket.hpp b/src/socket.hpp index 3b845a4c3..b54e56c26 100644 --- a/src/socket.hpp +++ b/src/socket.hpp @@ -350,7 +350,6 @@ class Socket : public RefCounted { uv_loop_t* loop() { return tcp_.loop; } const Address& address() const { return address_; } - const String& address_string() const { return address_string_; } private: static void alloc_buffer(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf); @@ -376,7 +375,6 @@ class Socket : public RefCounted { size_t max_reusable_write_objects_; Address address_; - String address_string_; }; }}} // namespace datastax::internal::core diff --git a/src/socket_connector.cpp b/src/socket_connector.cpp index 9e44f6ad4..4ad4835b2 100644 --- a/src/socket_connector.cpp +++ b/src/socket_connector.cpp @@ -22,10 +22,25 @@ #define SSL_HANDSHAKE_MAX_BUFFER_SIZE (16 * 1024 + 5) using namespace datastax; +using namespace datastax::internal; using namespace datastax::internal::core; namespace datastax { namespace internal { namespace core { +namespace { + +// Used for debugging resolved addresses. +String to_string(const AddressVec& addresses) { + String result; + for (AddressVec::const_iterator it = addresses.begin(), end = addresses.end(); it != end; ++it) { + if (!result.empty()) result.append(", "); + result.append(it->to_string()); + } + return result; +} + +} // namespace + /** * A socket handler that handles the SSL handshake process. */ @@ -85,6 +100,8 @@ SocketSettings::SocketSettings(const Config& config) , max_reusable_write_objects(config.max_reusable_write_objects()) , local_address(config.local_address()) {} +Atomic SocketConnector::resolved_address_offset_(0); + SocketConnector::SocketConnector(const Address& address, const Callback& callback) : address_(address) , callback_(callback) @@ -99,23 +116,34 @@ SocketConnector* SocketConnector::with_settings(const SocketSettings& settings) void SocketConnector::connect(uv_loop_t* loop) { inc_ref(); // For the event loop - if (settings_.hostname_resolution_enabled) { - // Run hostname resolution then connect. - resolver_.reset(new NameResolver(address_, bind_callback(&SocketConnector::on_resolve, this))); + if (!address_.is_resolved()) { // Address not resolved + hostname_ = address_.hostname_or_address(); + + resolver_.reset(new Resolver(hostname_, address_.port(), + bind_callback(&SocketConnector::on_resolve, this))); resolver_->resolve(loop, settings_.resolve_timeout_ms); } else { - // Postpone the connection process until after this method ends because it - // can call the callback (via on_error() when when the socket fails to - // init/bind) and destroy its parent. - no_resolve_timer_.start(loop, - 0, // Run connect immediately after. - bind_callback(&SocketConnector::on_no_resolve, this)); + resolved_address_ = address_; + + if (settings_.hostname_resolution_enabled) { // Run hostname resolution then connect. + name_resolver_.reset( + new NameResolver(address_, bind_callback(&SocketConnector::on_name_resolve, this))); + name_resolver_->resolve(loop, settings_.resolve_timeout_ms); + } else { + // Postpone the connection process until after this method ends because it + // can call the callback (via on_error() when when the socket fails to + // init/bind) and destroy its parent. + no_resolve_timer_.start(loop, + 0, // Run connect immediately after. + bind_callback(&SocketConnector::on_no_resolve, this)); + } } } void SocketConnector::cancel() { error_code_ = SOCKET_CANCELED; if (resolver_) resolver_->cancel(); + if (name_resolver_) name_resolver_->cancel(); if (connector_) connector_->cancel(); if (socket_) socket_->close(); } @@ -127,7 +155,7 @@ Socket::Ptr SocketConnector::release_socket() { } void SocketConnector::internal_connect(uv_loop_t* loop) { - Socket::Ptr socket(new Socket(address_, settings_.max_reusable_write_objects)); + Socket::Ptr socket(new Socket(resolved_address_, settings_.max_reusable_write_objects)); if (uv_tcp_init(loop, socket->handle()) != 0) { on_error(SOCKET_ERROR_INIT, "Unable to initialize TCP object"); @@ -140,7 +168,8 @@ void SocketConnector::internal_connect(uv_loop_t* loop) { // This needs to be done after setting the socket to properly cleanup. const Address& local_address = settings_.local_address; if (local_address.is_valid()) { - int rc = uv_tcp_bind(socket->handle(), local_address.addr(), 0); + Address::SocketStorage storage; + int rc = uv_tcp_bind(socket->handle(), local_address.to_sockaddr(&storage), 0); if (rc != 0) { on_error(SOCKET_ERROR_BIND, "Unable to bind local address: " + String(uv_strerror(rc))); @@ -158,10 +187,11 @@ void SocketConnector::internal_connect(uv_loop_t* loop) { } if (settings_.ssl_context) { - ssl_session_.reset(settings_.ssl_context->create_session(address_, hostname_)); + ssl_session_.reset(settings_.ssl_context->create_session(resolved_address_, hostname_, + address_.server_name())); } - connector_.reset(new TcpConnector(address_)); + connector_.reset(new TcpConnector(resolved_address_)); connector_->connect(socket_->handle(), bind_callback(&SocketConnector::on_connect, this)); } @@ -182,10 +212,8 @@ void SocketConnector::ssl_handshake() { size_t size = ssl_session_->outgoing().read(buf, SSL_HANDSHAKE_MAX_BUFFER_SIZE); if (size > 0) { socket_->write_and_flush(new BufferSocketRequest(Buffer(buf, size))); - } - - // If the handshake process is done then verify the certificate and finish. - if (ssl_session_->is_handshake_done()) { + } else if (ssl_session_->is_handshake_done()) { // If the handshake process is done then verify + // the certificate and finish. ssl_session_->verify(); if (ssl_session_->has_error()) { on_error(SOCKET_ERROR_SSL_VERIFY, @@ -253,7 +281,26 @@ void SocketConnector::on_connect(TcpConnector* tcp_connector) { } } -void SocketConnector::on_resolve(NameResolver* resolver) { +void SocketConnector::on_resolve(Resolver* resolver) { + if (resolver->is_success()) { + const AddressVec& addresses(resolver->addresses()); + LOG_DEBUG("Resolved the addresses %s for hostname %s", to_string(addresses).c_str(), + hostname_.c_str()); + resolved_address_ = Address( + addresses[resolved_address_offset_.fetch_add(MEMORY_ORDER_RELAXED) % addresses.size()], + address_.server_name()); // Keep the server name for debugging + internal_connect(resolver->loop()); + } else if (is_canceled() || resolver->is_canceled()) { + finish(); + } else if (resolver->is_timed_out()) { + on_error(SOCKET_ERROR_RESOLVE_TIMEOUT, "Timed out attempting to resolve hostname"); + } else { + on_error(SOCKET_ERROR_RESOLVE, + "Unable to resolve hostname '" + String(uv_strerror(resolver->uv_status())) + "'"); + } +} + +void SocketConnector::on_name_resolve(NameResolver* resolver) { if (resolver->is_success()) { LOG_DEBUG("Resolved the hostname %s for address %s", resolver->hostname().c_str(), resolver->address().to_string().c_str()); diff --git a/src/socket_connector.hpp b/src/socket_connector.hpp index 9349ddf04..c7a1d33eb 100644 --- a/src/socket_connector.hpp +++ b/src/socket_connector.hpp @@ -17,8 +17,10 @@ #ifndef DATASTAX_INTERNAL_SOCKET_CONNECTOR_HPP #define DATASTAX_INTERNAL_SOCKET_CONNECTOR_HPP +#include "atomic.hpp" #include "callback.hpp" #include "name_resolver.hpp" +#include "resolver.hpp" #include "socket.hpp" #include "tcp_connector.hpp" @@ -117,7 +119,6 @@ class SocketConnector : public RefCounted { Socket::Ptr release_socket(); public: - const Address& address() { return address_; } const String& hostname() { return hostname_; } ScopedPtr& ssl_session() { return ssl_session_; } @@ -141,17 +142,23 @@ class SocketConnector : public RefCounted { void on_error(SocketError code, const String& message); void on_connect(TcpConnector* tcp_connecter); - void on_resolve(NameResolver* resolver); + void on_resolve(Resolver* resolver); + void on_name_resolve(NameResolver* resolver); void on_no_resolve(Timer* timer); +private: + static Atomic resolved_address_offset_; + private: Address address_; + Address resolved_address_; String hostname_; Callback callback_; Socket::Ptr socket_; TcpConnector::Ptr connector_; - NameResolver::Ptr resolver_; + Resolver::Ptr resolver_; + NameResolver::Ptr name_resolver_; Timer no_resolve_timer_; SocketError error_code_; diff --git a/src/ssl.hpp b/src/ssl.hpp index 2b6753c47..495aa7357 100644 --- a/src/ssl.hpp +++ b/src/ssl.hpp @@ -32,9 +32,11 @@ namespace datastax { namespace internal { namespace core { class SslSession : public Allocated { public: - SslSession(const Address& address, const String& hostname, int flags) + SslSession(const Address& address, const String& hostname, const String& sni_server_name, + int flags) : address_(address) , hostname_(hostname) + , sni_server_name_(sni_server_name) , verify_flags_(flags) , error_code_(CASS_OK) {} @@ -59,6 +61,7 @@ class SslSession : public Allocated { protected: Address address_; String hostname_; + String sni_server_name_; int verify_flags_; rb::RingBuffer incoming_; rb::RingBuffer outgoing_; @@ -78,7 +81,8 @@ class SslContext : public RefCounted { void set_verify_flags(int flags) { verify_flags_ = flags; } bool is_cert_validation_enabled() { return verify_flags_ != CASS_SSL_VERIFY_NONE; } - virtual SslSession* create_session(const Address& address, const String& hostname) = 0; + virtual SslSession* create_session(const Address& address, const String& hostname, + const String& sni_server_name) = 0; virtual CassError add_trusted_cert(const char* cert, size_t cert_length) = 0; virtual CassError set_cert(const char* cert, size_t cert_length) = 0; virtual CassError set_private_key(const char* key, size_t key_length, const char* password, diff --git a/src/ssl/ssl_no_impl.cpp b/src/ssl/ssl_no_impl.cpp index 5d0637e84..ff539211d 100644 --- a/src/ssl/ssl_no_impl.cpp +++ b/src/ssl/ssl_no_impl.cpp @@ -19,14 +19,16 @@ using namespace datastax; using namespace datastax::internal::core; -NoSslSession::NoSslSession(const Address& address, const String& hostname) - : SslSession(address, hostname, CASS_SSL_VERIFY_NONE) { +NoSslSession::NoSslSession(const Address& address, const String& hostname, + const String& sni_server_name) + : SslSession(address, hostname, sni_server_name, CASS_SSL_VERIFY_NONE) { error_code_ = CASS_ERROR_LIB_NOT_IMPLEMENTED; error_message_ = "SSL support not built into driver"; } -SslSession* NoSslContext::create_session(const Address& address, const String& hostname) { - return new NoSslSession(address, hostname); +SslSession* NoSslContext::create_session(const Address& address, const String& hostname, + const String& sni_server_name) { + return new NoSslSession(address, hostname, sni_server_name); } CassError NoSslContext::add_trusted_cert(const char* cert, size_t cert_length) { diff --git a/src/ssl/ssl_no_impl.hpp b/src/ssl/ssl_no_impl.hpp index 9a0f6dc7a..09c9fd916 100644 --- a/src/ssl/ssl_no_impl.hpp +++ b/src/ssl/ssl_no_impl.hpp @@ -21,7 +21,7 @@ namespace datastax { namespace internal { namespace core { class NoSslSession : public SslSession { public: - NoSslSession(const Address& address, const String& hostname); + NoSslSession(const Address& address, const String& hostname, const String& sni_server_name); virtual bool is_handshake_done() const { return false; } virtual void do_handshake() {} diff --git a/src/ssl/ssl_openssl_impl.cpp b/src/ssl/ssl_openssl_impl.cpp index 17bb148b6..8d68b1a4e 100644 --- a/src/ssl/ssl_openssl_impl.cpp +++ b/src/ssl/ssl_openssl_impl.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -36,6 +37,22 @@ #define SSL_F_SSL_CTX_USE_CERTIFICATE_CHAIN_FILE SSL_F_USE_CERTIFICATE_CHAIN_FILE #endif +#if defined(OPENSSL_VERSION_NUMBER) && \ + !defined(LIBRESSL_VERSION_NUMBER) // Required as OPENSSL_VERSION_NUMBER for LibreSSL is defined + // as 2.0.0 +#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) +#define SSL_CLIENT_METHOD TLS_client_method +#else +#define SSL_CLIENT_METHOD SSLv23_client_method +#endif +#else +#if (LIBRESSL_VERSION_NUMBER >= 0x20302000L) +#define SSL_CLIENT_METHOD TLS_client_method +#else +#define SSL_CLIENT_METHOD SSLv23_client_method +#endif +#endif + using namespace datastax; using namespace datastax::internal; using namespace datastax::internal::core; @@ -251,7 +268,7 @@ class OpenSslVerifyIdentity { static Result match(X509* cert, const Address& address) { Result result = match_subject_alt_names_ipadd(cert, address); if (result == NO_SAN_PRESENT) { - result = match_common_name_ipaddr(cert, address.to_string()); + result = match_common_name_ipaddr(cert, address.hostname_or_address()); } return result; } @@ -303,7 +320,7 @@ class OpenSslVerifyIdentity { } int i = -1; - while ((i = X509_NAME_get_index_by_NID(name, NID_commonName, i)) > 0) { + while ((i = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0) { X509_NAME_ENTRY* name_entry = X509_NAME_get_entry(name, i); if (name_entry == NULL) { return INVALID_CERT; @@ -410,20 +427,23 @@ class OpenSslVerifyIdentity { } }; -OpenSslSession::OpenSslSession(const Address& address, const String& hostname, int flags, - SSL_CTX* ssl_ctx) - : SslSession(address, hostname, flags) +OpenSslSession::OpenSslSession(const Address& address, const String& hostname, + const String& sni_server_name, int flags, SSL_CTX* ssl_ctx) + : SslSession(address, hostname, sni_server_name, flags) , ssl_(SSL_new(ssl_ctx)) , incoming_state_(&incoming_) , outgoing_state_(&outgoing_) , incoming_bio_(rb::RingBufferBio::create(&incoming_state_)) , outgoing_bio_(rb::RingBufferBio::create(&outgoing_state_)) { SSL_set_bio(ssl_, incoming_bio_, outgoing_bio_); - SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_NONE, ssl_no_verify_callback); #if DEBUG_SSL SSL_CTX_set_info_callback(ssl_ctx, ssl_info_callback); #endif SSL_set_connect_state(ssl_); + + if (!sni_server_name_.empty()) { + SSL_set_tlsext_host_name(ssl_, const_cast(sni_server_name_.c_str())); + } } OpenSslSession::~OpenSslSession() { SSL_free(ssl_); } @@ -509,22 +529,26 @@ int OpenSslSession::decrypt(char* buf, size_t size) { void OpenSslSession::check_error(int rc) { int err = SSL_get_error(ssl_, rc); - if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_NONE) { + if (err == SSL_ERROR_ZERO_RETURN) { + error_code_ = CASS_ERROR_SSL_CLOSED; + } else if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_NONE) { error_code_ = CASS_ERROR_SSL_PROTOCOL_ERROR; error_message_ = ssl_error_string(); } } OpenSslContext::OpenSslContext() - : ssl_ctx_(SSL_CTX_new(SSLv23_client_method())) + : ssl_ctx_(SSL_CTX_new(SSL_CLIENT_METHOD())) , trusted_store_(X509_STORE_new()) { SSL_CTX_set_cert_store(ssl_ctx_, trusted_store_); + SSL_CTX_set_verify(ssl_ctx_, SSL_VERIFY_NONE, ssl_no_verify_callback); } OpenSslContext::~OpenSslContext() { SSL_CTX_free(ssl_ctx_); } -SslSession* OpenSslContext::create_session(const Address& address, const String& hostname) { - return new OpenSslSession(address, hostname, verify_flags_, ssl_ctx_); +SslSession* OpenSslContext::create_session(const Address& address, const String& hostname, + const String& sni_server_name) { + return new OpenSslSession(address, hostname, sni_server_name, verify_flags_, ssl_ctx_); } CassError OpenSslContext::add_trusted_cert(const char* cert, size_t cert_length) { diff --git a/src/ssl/ssl_openssl_impl.hpp b/src/ssl/ssl_openssl_impl.hpp index c52aeeee6..f347caaa8 100644 --- a/src/ssl/ssl_openssl_impl.hpp +++ b/src/ssl/ssl_openssl_impl.hpp @@ -27,7 +27,8 @@ namespace datastax { namespace internal { namespace core { class OpenSslSession : public SslSession { public: - OpenSslSession(const Address& address, const String& hostname, int flags, SSL_CTX* ssl_ctx); + OpenSslSession(const Address& address, const String& hostname, const String& sni_server_name, + int flags, SSL_CTX* ssl_ctx); ~OpenSslSession(); virtual bool is_handshake_done() const { return SSL_is_init_finished(ssl_) != 0; } @@ -54,7 +55,8 @@ class OpenSslContext : public SslContext { ~OpenSslContext(); - virtual SslSession* create_session(const Address& address, const String& hostname); + virtual SslSession* create_session(const Address& address, const String& hostname, + const String& sni_server_name); virtual CassError add_trusted_cert(const char* cert, size_t cert_length); virtual CassError set_cert(const char* cert, size_t cert_length); virtual CassError set_private_key(const char* key, size_t key_length, const char* password, diff --git a/src/statement.cpp b/src/statement.cpp index dc96884c1..ab947d09c 100644 --- a/src/statement.cpp +++ b/src/statement.cpp @@ -154,8 +154,8 @@ CassError cass_statement_set_host(CassStatement* statement, const char* host, in CassError cass_statement_set_host_n(CassStatement* statement, const char* host, size_t host_length, int port) { - Address address; - if (!Address::from_string(String(host, host_length), port, &address)) { + Address address(String(host, host_length), port); + if (!address.is_valid_and_resolved()) { return CASS_ERROR_LIB_BAD_PARAMS; } statement->set_host(address); @@ -163,8 +163,8 @@ CassError cass_statement_set_host_n(CassStatement* statement, const char* host, } CassError cass_statement_set_host_inet(CassStatement* statement, const CassInet* host, int port) { - Address address; - if (!Address::from_inet(host->address, host->address_length, port, &address)) { + Address address(host->address, host->address_length, port); + if (!address.is_valid_and_resolved()) { return CASS_ERROR_LIB_BAD_PARAMS; } statement->set_host(address); @@ -275,7 +275,7 @@ Statement::Statement(const Prepared* prepared) , page_size_(-1) { // [short bytes] (or [string]) const String& id = prepared->id(); - query_or_id_.encode_string(0, id.data(), id.size()); + query_or_id_.encode_string(0, id.data(), static_cast(id.size())); // Inherit settings and keyspace from the prepared statement set_settings(prepared->request_settings()); // If the keyspace wasn't explictly set then attempt to set it using the @@ -315,7 +315,7 @@ int32_t Statement::encode_batch(ProtocolVersion version, RequestCallback* callba { // [short] bufs->push_back(Buffer(sizeof(uint16_t))); Buffer& buf = bufs->back(); - buf.encode_uint16(0, elements().size()); + buf.encode_uint16(0, static_cast(elements().size())); length += sizeof(uint16_t); } @@ -408,7 +408,7 @@ int32_t Statement::encode_begin(ProtocolVersion version, uint16_t element_count, if (version >= CASS_PROTOCOL_VERSION_V5) { pos = buf.encode_int32(pos, flags); } else { - pos = buf.encode_byte(pos, flags); + pos = buf.encode_byte(pos, static_cast(flags)); } if (element_count > 0) { @@ -501,7 +501,7 @@ int32_t Statement::encode_end(ProtocolVersion version, RequestCallback* callback } if (with_keyspace) { - pos = buf.encode_string(pos, keyspace().data(), keyspace().size()); + pos = buf.encode_string(pos, keyspace().data(), static_cast(keyspace().size())); } } @@ -542,7 +542,7 @@ bool Statement::calculate_routing_key(const Vector& key_indices, size_t size = buf.size() - sizeof(int32_t); char size_buf[sizeof(uint16_t)]; - encode_uint16(size_buf, size); + encode_uint16(size_buf, static_cast(size)); routing_key->append(size_buf, sizeof(uint16_t)); routing_key->append(buf.data() + sizeof(int32_t), size); routing_key->push_back(0); diff --git a/src/statement.hpp b/src/statement.hpp index 1670a5080..41a3e70a3 100644 --- a/src/statement.hpp +++ b/src/statement.hpp @@ -59,7 +59,7 @@ class Statement } } - bool has_names_for_values() const { return flags_ & CASS_QUERY_FLAG_NAMES_FOR_VALUES; } + bool has_names_for_values() const { return (flags_ & CASS_QUERY_FLAG_NAMES_FOR_VALUES) != 0; } int32_t page_size() const { return page_size_; } diff --git a/src/supported_response.cpp b/src/supported_response.cpp index daba03728..53c0035ec 100644 --- a/src/supported_response.cpp +++ b/src/supported_response.cpp @@ -16,30 +16,29 @@ #include "supported_response.hpp" +#include "logger.hpp" #include "serialization.hpp" +#include "utils.hpp" +#include + +using namespace datastax; +using namespace datastax::internal; using namespace datastax::internal::core; bool SupportedResponse::decode(Decoder& decoder) { decoder.set_type("supported"); - StringMultimap supported; - - CHECK_RESULT(decoder.decode_string_multimap(supported)); + StringMultimap supported_options; + CHECK_RESULT(decoder.decode_string_multimap(supported_options)); decoder.maybe_log_remaining(); - StringMultimap::const_iterator it = supported.find("COMPRESSION"); - if (it != supported.end()) { - compression_ = it->second; + // Force keys to be uppercase + for (StringMultimap::iterator it = supported_options.begin(), end = supported_options.end(); + it != end; ++it) { + String key = it->first; + std::transform(key.begin(), key.end(), key.begin(), toupper); + supported_options_[key] = it->second; } - it = supported.find("CQL_VERSIONS"); - if (it != supported.end()) { - cql_versions_ = it->second; - } - - it = supported.find("PROTOCOL_VERSIONS"); - if (it != supported.end()) { - protocol_versions_ = it->second; - } return true; } diff --git a/src/supported_response.hpp b/src/supported_response.hpp index ddc8a1fe4..c1f951119 100644 --- a/src/supported_response.hpp +++ b/src/supported_response.hpp @@ -31,14 +31,15 @@ class SupportedResponse : public Response { virtual bool decode(Decoder& decoder); - const Vector compression() { return compression_; } - const Vector cql_versions() { return cql_versions_; } - const Vector protocol_versions() { return protocol_versions_; } + /** + * Get the supported options provided by the server. + * + * @return Supported options; all keys are normalized (uppercase). + */ + const StringMultimap& supported_options() const { return supported_options_; } private: - Vector compression_; - Vector cql_versions_; - Vector protocol_versions_; + StringMultimap supported_options_; }; }}} // namespace datastax::internal::core diff --git a/src/tcp_connector.hpp b/src/tcp_connector.hpp index bf9f1a896..0408d86ed 100644 --- a/src/tcp_connector.hpp +++ b/src/tcp_connector.hpp @@ -63,7 +63,8 @@ class TcpConnector : public RefCounted { callback_ = callback; status_ = CONNECTING; - rc = uv_tcp_connect(&req_, handle, static_cast(address_).addr(), on_connect); + Address::SocketStorage storage; + rc = uv_tcp_connect(&req_, handle, address_.to_sockaddr(&storage), on_connect); if (rc != 0) { status_ = FAILED_BAD_PARAM; diff --git a/src/testing.cpp b/src/testing.cpp index c5d630c24..5f1e8c851 100644 --- a/src/testing.cpp +++ b/src/testing.cpp @@ -37,7 +37,7 @@ String get_host_from_future(CassFuture* future) { return ""; } ResponseFuture* response_future = static_cast(future->from()); - return response_future->address().to_string(); + return response_future->address().hostname_or_address(); } unsigned get_connect_timeout_from_cluster(CassCluster* cluster) { @@ -49,14 +49,14 @@ int get_port_from_cluster(CassCluster* cluster) { return cluster->config().port( String get_contact_points_from_cluster(CassCluster* cluster) { String str; - const ContactPointList& contact_points = cluster->config().contact_points(); + const AddressVec& contact_points = cluster->config().contact_points(); - for (ContactPointList::const_iterator it = contact_points.begin(), end = contact_points.end(); + for (AddressVec::const_iterator it = contact_points.begin(), end = contact_points.end(); it != end; ++it) { if (str.size() > 0) { str.push_back(','); } - str.append(*it); + str.append(it->hostname_or_address()); } return str; @@ -69,8 +69,8 @@ int64_t create_murmur3_hash_from_string(const String& value) { uint64_t get_time_since_epoch_in_ms() { return internal::get_time_since_epoch_ms(); } uint64_t get_host_latency_average(CassSession* session, String ip_address, int port) { - Address address; - if (Address::from_string(ip_address, port, &address)) { + Address address(ip_address, port); + if (address.is_valid()) { Host::Ptr host(session->cluster()->find_host(address)); return host ? host->get_current_average().average : 0; } diff --git a/src/third_party/http-parser/AUTHORS b/src/third_party/http-parser/AUTHORS new file mode 100644 index 000000000..5323b685c --- /dev/null +++ b/src/third_party/http-parser/AUTHORS @@ -0,0 +1,68 @@ +# Authors ordered by first contribution. +Ryan Dahl +Jeremy Hinegardner +Sergey Shepelev +Joe Damato +tomika +Phoenix Sol +Cliff Frey +Ewen Cheslack-Postava +Santiago Gala +Tim Becker +Jeff Terrace +Ben Noordhuis +Nathan Rajlich +Mark Nottingham +Aman Gupta +Tim Becker +Sean Cunningham +Peter Griess +Salman Haq +Cliff Frey +Jon Kolb +Fouad Mardini +Paul Querna +Felix Geisendörfer +koichik +Andre Caron +Ivo Raisr +James McLaughlin +David Gwynne +Thomas LE ROUX +Randy Rizun +Andre Louis Caron +Simon Zimmermann +Erik Dubbelboer +Martell Malone +Bertrand Paquet +BogDan Vatra +Peter Faiman +Corey Richardson +Tóth Tamás +Cam Swords +Chris Dickinson +Uli Köhler +Charlie Somerville +Patrik Stutz +Fedor Indutny +runner +Alexis Campailla +David Wragg +Vinnie Falco +Alex Butum +Rex Feng +Alex Kocharin +Mark Koopman +Helge Heß +Alexis La Goutte +George Miroshnykov +Maciej Małecki +Marc O'Morain +Jeff Pinner +Timothy J Fontaine +Akagi201 +Romain Giraud +Jay Satiro +Arne Steen +Kjell Schubert +Olivier Mengué diff --git a/src/third_party/http-parser/LICENSE-MIT b/src/third_party/http-parser/LICENSE-MIT new file mode 100644 index 000000000..1ec0ab4e1 --- /dev/null +++ b/src/third_party/http-parser/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright Joyent, Inc. and other Node contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/src/third_party/http-parser/README b/src/third_party/http-parser/README new file mode 100644 index 000000000..a8a997db7 --- /dev/null +++ b/src/third_party/http-parser/README @@ -0,0 +1,61 @@ +SHA: 5c17dad400e45c5a442a63f250fff2638d144682 +URL: https://github.com/nodejs/http-parser +Commit Date: 2019/04/16 + +git clone https://github.com/nodejs/http-parser.git /tmp/http-parser +cp -r /tmp/http-parser . +rm -f .gitignore .mailmap .travis.yml + +diff --git a/src/third_party/http-parser/http_parser.h b/src/third_party/http-parser/http_parser.h +index 16b5281d..657d011f 100644 +--- a/src/third_party/http-parser/http_parser.h ++++ b/src/third_party/http-parser/http_parser.h +@@ -156,6 +156,9 @@ enum http_status + #define XX(num, name, string) HTTP_STATUS_##name = num, + HTTP_STATUS_MAP(XX) + #undef XX ++ /* @cond IGNORE */ ++ HTTP_STATUS_MAP_LAST_ENTRY ++ /* @endcond */ + }; + + +@@ -209,6 +212,9 @@ enum http_method + #define XX(num, name, string) HTTP_##name = num, + HTTP_METHOD_MAP(XX) + #undef XX ++ /* @cond IGNORE */ ++ HTTP_METHOD_MAP_LAST_ENTRY ++ /* @endcond */ + }; + + +@@ -282,6 +288,9 @@ enum flags + #define HTTP_ERRNO_GEN(n, s) HPE_##n, + enum http_errno { + HTTP_ERRNO_MAP(HTTP_ERRNO_GEN) ++ /* @cond IGNORE */ ++ HTTP_ERRNO_MAP_LAST_ENTRY ++ /* @endcond */ + }; + #undef HTTP_ERRNO_GEN + +Copyright Joyent, Inc. and other Node contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/src/third_party/http-parser/README.md b/src/third_party/http-parser/README.md new file mode 100644 index 000000000..b265d7171 --- /dev/null +++ b/src/third_party/http-parser/README.md @@ -0,0 +1,246 @@ +HTTP Parser +=========== + +[![Build Status](https://api.travis-ci.org/nodejs/http-parser.svg?branch=master)](https://travis-ci.org/nodejs/http-parser) + +This is a parser for HTTP messages written in C. It parses both requests and +responses. The parser is designed to be used in performance HTTP +applications. It does not make any syscalls nor allocations, it does not +buffer data, it can be interrupted at anytime. Depending on your +architecture, it only requires about 40 bytes of data per message +stream (in a web server that is per connection). + +Features: + + * No dependencies + * Handles persistent streams (keep-alive). + * Decodes chunked encoding. + * Upgrade support + * Defends against buffer overflow attacks. + +The parser extracts the following information from HTTP messages: + + * Header fields and values + * Content-Length + * Request method + * Response status code + * Transfer-Encoding + * HTTP version + * Request URL + * Message body + + +Usage +----- + +One `http_parser` object is used per TCP connection. Initialize the struct +using `http_parser_init()` and set the callbacks. That might look something +like this for a request parser: +```c +http_parser_settings settings; +settings.on_url = my_url_callback; +settings.on_header_field = my_header_field_callback; +/* ... */ + +http_parser *parser = malloc(sizeof(http_parser)); +http_parser_init(parser, HTTP_REQUEST); +parser->data = my_socket; +``` + +When data is received on the socket execute the parser and check for errors. + +```c +size_t len = 80*1024, nparsed; +char buf[len]; +ssize_t recved; + +recved = recv(fd, buf, len, 0); + +if (recved < 0) { + /* Handle error. */ +} + +/* Start up / continue the parser. + * Note we pass recved==0 to signal that EOF has been received. + */ +nparsed = http_parser_execute(parser, &settings, buf, recved); + +if (parser->upgrade) { + /* handle new protocol */ +} else if (nparsed != recved) { + /* Handle error. Usually just close the connection. */ +} +``` + +`http_parser` needs to know where the end of the stream is. For example, sometimes +servers send responses without Content-Length and expect the client to +consume input (for the body) until EOF. To tell `http_parser` about EOF, give +`0` as the fourth parameter to `http_parser_execute()`. Callbacks and errors +can still be encountered during an EOF, so one must still be prepared +to receive them. + +Scalar valued message information such as `status_code`, `method`, and the +HTTP version are stored in the parser structure. This data is only +temporally stored in `http_parser` and gets reset on each new message. If +this information is needed later, copy it out of the structure during the +`headers_complete` callback. + +The parser decodes the transfer-encoding for both requests and responses +transparently. That is, a chunked encoding is decoded before being sent to +the on_body callback. + + +The Special Problem of Upgrade +------------------------------ + +`http_parser` supports upgrading the connection to a different protocol. An +increasingly common example of this is the WebSocket protocol which sends +a request like + + GET /demo HTTP/1.1 + Upgrade: WebSocket + Connection: Upgrade + Host: example.com + Origin: http://example.com + WebSocket-Protocol: sample + +followed by non-HTTP data. + +(See [RFC6455](https://tools.ietf.org/html/rfc6455) for more information the +WebSocket protocol.) + +To support this, the parser will treat this as a normal HTTP message without a +body, issuing both on_headers_complete and on_message_complete callbacks. However +http_parser_execute() will stop parsing at the end of the headers and return. + +The user is expected to check if `parser->upgrade` has been set to 1 after +`http_parser_execute()` returns. Non-HTTP data begins at the buffer supplied +offset by the return value of `http_parser_execute()`. + + +Callbacks +--------- + +During the `http_parser_execute()` call, the callbacks set in +`http_parser_settings` will be executed. The parser maintains state and +never looks behind, so buffering the data is not necessary. If you need to +save certain data for later usage, you can do that from the callbacks. + +There are two types of callbacks: + +* notification `typedef int (*http_cb) (http_parser*);` + Callbacks: on_message_begin, on_headers_complete, on_message_complete. +* data `typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);` + Callbacks: (requests only) on_url, + (common) on_header_field, on_header_value, on_body; + +Callbacks must return 0 on success. Returning a non-zero value indicates +error to the parser, making it exit immediately. + +For cases where it is necessary to pass local information to/from a callback, +the `http_parser` object's `data` field can be used. +An example of such a case is when using threads to handle a socket connection, +parse a request, and then give a response over that socket. By instantiation +of a thread-local struct containing relevant data (e.g. accepted socket, +allocated memory for callbacks to write into, etc), a parser's callbacks are +able to communicate data between the scope of the thread and the scope of the +callback in a threadsafe manner. This allows `http_parser` to be used in +multi-threaded contexts. + +Example: +```c + typedef struct { + socket_t sock; + void* buffer; + int buf_len; + } custom_data_t; + + +int my_url_callback(http_parser* parser, const char *at, size_t length) { + /* access to thread local custom_data_t struct. + Use this access save parsed data for later use into thread local + buffer, or communicate over socket + */ + parser->data; + ... + return 0; +} + +... + +void http_parser_thread(socket_t sock) { + int nparsed = 0; + /* allocate memory for user data */ + custom_data_t *my_data = malloc(sizeof(custom_data_t)); + + /* some information for use by callbacks. + * achieves thread -> callback information flow */ + my_data->sock = sock; + + /* instantiate a thread-local parser */ + http_parser *parser = malloc(sizeof(http_parser)); + http_parser_init(parser, HTTP_REQUEST); /* initialise parser */ + /* this custom data reference is accessible through the reference to the + parser supplied to callback functions */ + parser->data = my_data; + + http_parser_settings settings; /* set up callbacks */ + settings.on_url = my_url_callback; + + /* execute parser */ + nparsed = http_parser_execute(parser, &settings, buf, recved); + + ... + /* parsed information copied from callback. + can now perform action on data copied into thread-local memory from callbacks. + achieves callback -> thread information flow */ + my_data->buffer; + ... +} + +``` + +In case you parse HTTP message in chunks (i.e. `read()` request line +from socket, parse, read half headers, parse, etc) your data callbacks +may be called more than once. `http_parser` guarantees that data pointer is only +valid for the lifetime of callback. You can also `read()` into a heap allocated +buffer to avoid copying memory around if this fits your application. + +Reading headers may be a tricky task if you read/parse headers partially. +Basically, you need to remember whether last header callback was field or value +and apply the following logic: + + (on_header_field and on_header_value shortened to on_h_*) + ------------------------ ------------ -------------------------------------------- + | State (prev. callback) | Callback | Description/action | + ------------------------ ------------ -------------------------------------------- + | nothing (first call) | on_h_field | Allocate new buffer and copy callback data | + | | | into it | + ------------------------ ------------ -------------------------------------------- + | value | on_h_field | New header started. | + | | | Copy current name,value buffers to headers | + | | | list and allocate new buffer for new name | + ------------------------ ------------ -------------------------------------------- + | field | on_h_field | Previous name continues. Reallocate name | + | | | buffer and append callback data to it | + ------------------------ ------------ -------------------------------------------- + | field | on_h_value | Value for current header started. Allocate | + | | | new buffer and copy callback data to it | + ------------------------ ------------ -------------------------------------------- + | value | on_h_value | Value continues. Reallocate value buffer | + | | | and append callback data to it | + ------------------------ ------------ -------------------------------------------- + + +Parsing URLs +------------ + +A simplistic zero-copy URL parser is provided as `http_parser_parse_url()`. +Users of this library may wish to use it to parse URLs constructed from +consecutive `on_url` callbacks. + +See examples of reading in headers: + +* [partial example](http://gist.github.com/155877) in C +* [from http-parser tests](http://github.com/joyent/http-parser/blob/37a0ff8/test.c#L403) in C +* [from Node library](http://github.com/joyent/node/blob/842eaf4/src/http.js#L284) in Javascript diff --git a/src/third_party/http-parser/bench.c b/src/third_party/http-parser/bench.c new file mode 100644 index 000000000..678f5556c --- /dev/null +++ b/src/third_party/http-parser/bench.c @@ -0,0 +1,128 @@ +/* Copyright Fedor Indutny. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include "http_parser.h" +#include +#include +#include +#include +#include + +/* 8 gb */ +static const int64_t kBytes = 8LL << 30; + +static const char data[] = + "POST /joyent/http-parser HTTP/1.1\r\n" + "Host: github.com\r\n" + "DNT: 1\r\n" + "Accept-Encoding: gzip, deflate, sdch\r\n" + "Accept-Language: ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4\r\n" + "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/39.0.2171.65 Safari/537.36\r\n" + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9," + "image/webp,*/*;q=0.8\r\n" + "Referer: https://github.com/joyent/http-parser\r\n" + "Connection: keep-alive\r\n" + "Transfer-Encoding: chunked\r\n" + "Cache-Control: max-age=0\r\n\r\nb\r\nhello world\r\n0\r\n"; +static const size_t data_len = sizeof(data) - 1; + +static int on_info(http_parser* p) { + return 0; +} + + +static int on_data(http_parser* p, const char *at, size_t length) { + return 0; +} + +static http_parser_settings settings = { + .on_message_begin = on_info, + .on_headers_complete = on_info, + .on_message_complete = on_info, + .on_header_field = on_data, + .on_header_value = on_data, + .on_url = on_data, + .on_status = on_data, + .on_body = on_data +}; + +int bench(int iter_count, int silent) { + struct http_parser parser; + int i; + int err; + struct timeval start; + struct timeval end; + + if (!silent) { + err = gettimeofday(&start, NULL); + assert(err == 0); + } + + fprintf(stderr, "req_len=%d\n", (int) data_len); + for (i = 0; i < iter_count; i++) { + size_t parsed; + http_parser_init(&parser, HTTP_REQUEST); + + parsed = http_parser_execute(&parser, &settings, data, data_len); + assert(parsed == data_len); + } + + if (!silent) { + double elapsed; + double bw; + double total; + + err = gettimeofday(&end, NULL); + assert(err == 0); + + fprintf(stdout, "Benchmark result:\n"); + + elapsed = (double) (end.tv_sec - start.tv_sec) + + (end.tv_usec - start.tv_usec) * 1e-6f; + + total = (double) iter_count * data_len; + bw = (double) total / elapsed; + + fprintf(stdout, "%.2f mb | %.2f mb/s | %.2f req/sec | %.2f s\n", + (double) total / (1024 * 1024), + bw / (1024 * 1024), + (double) iter_count / elapsed, + elapsed); + + fflush(stdout); + } + + return 0; +} + +int main(int argc, char** argv) { + int64_t iterations; + + iterations = kBytes / (int64_t) data_len; + if (argc == 2 && strcmp(argv[1], "infinite") == 0) { + for (;;) + bench(iterations, 1); + return 0; + } else { + return bench(iterations, 0); + } +} diff --git a/src/third_party/http-parser/contrib/parsertrace.c b/src/third_party/http-parser/contrib/parsertrace.c new file mode 100644 index 000000000..3daa7f46a --- /dev/null +++ b/src/third_party/http-parser/contrib/parsertrace.c @@ -0,0 +1,157 @@ +/* Copyright Joyent, Inc. and other Node contributors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* Dump what the parser finds to stdout as it happen */ + +#include "http_parser.h" +#include +#include +#include + +int on_message_begin(http_parser* _) { + (void)_; + printf("\n***MESSAGE BEGIN***\n\n"); + return 0; +} + +int on_headers_complete(http_parser* _) { + (void)_; + printf("\n***HEADERS COMPLETE***\n\n"); + return 0; +} + +int on_message_complete(http_parser* _) { + (void)_; + printf("\n***MESSAGE COMPLETE***\n\n"); + return 0; +} + +int on_url(http_parser* _, const char* at, size_t length) { + (void)_; + printf("Url: %.*s\n", (int)length, at); + return 0; +} + +int on_header_field(http_parser* _, const char* at, size_t length) { + (void)_; + printf("Header field: %.*s\n", (int)length, at); + return 0; +} + +int on_header_value(http_parser* _, const char* at, size_t length) { + (void)_; + printf("Header value: %.*s\n", (int)length, at); + return 0; +} + +int on_body(http_parser* _, const char* at, size_t length) { + (void)_; + printf("Body: %.*s\n", (int)length, at); + return 0; +} + +void usage(const char* name) { + fprintf(stderr, + "Usage: %s $type $filename\n" + " type: -x, where x is one of {r,b,q}\n" + " parses file as a Response, reQuest, or Both\n", + name); + exit(EXIT_FAILURE); +} + +int main(int argc, char* argv[]) { + enum http_parser_type file_type; + + if (argc != 3) { + usage(argv[0]); + } + + char* type = argv[1]; + if (type[0] != '-') { + usage(argv[0]); + } + + switch (type[1]) { + /* in the case of "-", type[1] will be NUL */ + case 'r': + file_type = HTTP_RESPONSE; + break; + case 'q': + file_type = HTTP_REQUEST; + break; + case 'b': + file_type = HTTP_BOTH; + break; + default: + usage(argv[0]); + } + + char* filename = argv[2]; + FILE* file = fopen(filename, "r"); + if (file == NULL) { + perror("fopen"); + goto fail; + } + + fseek(file, 0, SEEK_END); + long file_length = ftell(file); + if (file_length == -1) { + perror("ftell"); + goto fail; + } + fseek(file, 0, SEEK_SET); + + char* data = malloc(file_length); + if (fread(data, 1, file_length, file) != (size_t)file_length) { + fprintf(stderr, "couldn't read entire file\n"); + free(data); + goto fail; + } + + http_parser_settings settings; + memset(&settings, 0, sizeof(settings)); + settings.on_message_begin = on_message_begin; + settings.on_url = on_url; + settings.on_header_field = on_header_field; + settings.on_header_value = on_header_value; + settings.on_headers_complete = on_headers_complete; + settings.on_body = on_body; + settings.on_message_complete = on_message_complete; + + http_parser parser; + http_parser_init(&parser, file_type); + size_t nparsed = http_parser_execute(&parser, &settings, data, file_length); + free(data); + + if (nparsed != (size_t)file_length) { + fprintf(stderr, + "Error: %s (%s)\n", + http_errno_description(HTTP_PARSER_ERRNO(&parser)), + http_errno_name(HTTP_PARSER_ERRNO(&parser))); + goto fail; + } + + return EXIT_SUCCESS; + +fail: + fclose(file); + return EXIT_FAILURE; +} diff --git a/src/third_party/http-parser/contrib/url_parser.c b/src/third_party/http-parser/contrib/url_parser.c new file mode 100644 index 000000000..f235bed9e --- /dev/null +++ b/src/third_party/http-parser/contrib/url_parser.c @@ -0,0 +1,47 @@ +#include "http_parser.h" +#include +#include + +void +dump_url (const char *url, const struct http_parser_url *u) +{ + unsigned int i; + + printf("\tfield_set: 0x%x, port: %u\n", u->field_set, u->port); + for (i = 0; i < UF_MAX; i++) { + if ((u->field_set & (1 << i)) == 0) { + printf("\tfield_data[%u]: unset\n", i); + continue; + } + + printf("\tfield_data[%u]: off: %u, len: %u, part: %.*s\n", + i, + u->field_data[i].off, + u->field_data[i].len, + u->field_data[i].len, + url + u->field_data[i].off); + } +} + +int main(int argc, char ** argv) { + struct http_parser_url u; + int len, connect, result; + + if (argc != 3) { + printf("Syntax : %s connect|get url\n", argv[0]); + return 1; + } + len = strlen(argv[2]); + connect = strcmp("connect", argv[1]) == 0 ? 1 : 0; + printf("Parsing %s, connect %d\n", argv[2], connect); + + http_parser_url_init(&u); + result = http_parser_parse_url(argv[2], len, connect, &u); + if (result != 0) { + printf("Parse error : %d\n", result); + return result; + } + printf("Parse ok, result : \n"); + dump_url(argv[2], &u); + return 0; +} diff --git a/src/third_party/http-parser/http_parser.c b/src/third_party/http-parser/http_parser.c new file mode 100644 index 000000000..c353f680a --- /dev/null +++ b/src/third_party/http-parser/http_parser.c @@ -0,0 +1,2498 @@ +/* Copyright Joyent, Inc. and other Node contributors. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include "http_parser.h" +#include +#include +#include +#include +#include + +static uint32_t max_header_size = HTTP_MAX_HEADER_SIZE; + +#ifndef ULLONG_MAX +# define ULLONG_MAX ((uint64_t) -1) /* 2^64-1 */ +#endif + +#ifndef MIN +# define MIN(a,b) ((a) < (b) ? (a) : (b)) +#endif + +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#endif + +#ifndef BIT_AT +# define BIT_AT(a, i) \ + (!!((unsigned int) (a)[(unsigned int) (i) >> 3] & \ + (1 << ((unsigned int) (i) & 7)))) +#endif + +#ifndef ELEM_AT +# define ELEM_AT(a, i, v) ((unsigned int) (i) < ARRAY_SIZE(a) ? (a)[(i)] : (v)) +#endif + +#define SET_ERRNO(e) \ +do { \ + parser->nread = nread; \ + parser->http_errno = (e); \ +} while(0) + +#define CURRENT_STATE() p_state +#define UPDATE_STATE(V) p_state = (enum state) (V); +#define RETURN(V) \ +do { \ + parser->nread = nread; \ + parser->state = CURRENT_STATE(); \ + return (V); \ +} while (0); +#define REEXECUTE() \ + goto reexecute; \ + + +#ifdef __GNUC__ +# define LIKELY(X) __builtin_expect(!!(X), 1) +# define UNLIKELY(X) __builtin_expect(!!(X), 0) +#else +# define LIKELY(X) (X) +# define UNLIKELY(X) (X) +#endif + + +/* Run the notify callback FOR, returning ER if it fails */ +#define CALLBACK_NOTIFY_(FOR, ER) \ +do { \ + assert(HTTP_PARSER_ERRNO(parser) == HPE_OK); \ + \ + if (LIKELY(settings->on_##FOR)) { \ + parser->state = CURRENT_STATE(); \ + if (UNLIKELY(0 != settings->on_##FOR(parser))) { \ + SET_ERRNO(HPE_CB_##FOR); \ + } \ + UPDATE_STATE(parser->state); \ + \ + /* We either errored above or got paused; get out */ \ + if (UNLIKELY(HTTP_PARSER_ERRNO(parser) != HPE_OK)) { \ + return (ER); \ + } \ + } \ +} while (0) + +/* Run the notify callback FOR and consume the current byte */ +#define CALLBACK_NOTIFY(FOR) CALLBACK_NOTIFY_(FOR, p - data + 1) + +/* Run the notify callback FOR and don't consume the current byte */ +#define CALLBACK_NOTIFY_NOADVANCE(FOR) CALLBACK_NOTIFY_(FOR, p - data) + +/* Run data callback FOR with LEN bytes, returning ER if it fails */ +#define CALLBACK_DATA_(FOR, LEN, ER) \ +do { \ + assert(HTTP_PARSER_ERRNO(parser) == HPE_OK); \ + \ + if (FOR##_mark) { \ + if (LIKELY(settings->on_##FOR)) { \ + parser->state = CURRENT_STATE(); \ + if (UNLIKELY(0 != \ + settings->on_##FOR(parser, FOR##_mark, (LEN)))) { \ + SET_ERRNO(HPE_CB_##FOR); \ + } \ + UPDATE_STATE(parser->state); \ + \ + /* We either errored above or got paused; get out */ \ + if (UNLIKELY(HTTP_PARSER_ERRNO(parser) != HPE_OK)) { \ + return (ER); \ + } \ + } \ + FOR##_mark = NULL; \ + } \ +} while (0) + +/* Run the data callback FOR and consume the current byte */ +#define CALLBACK_DATA(FOR) \ + CALLBACK_DATA_(FOR, p - FOR##_mark, p - data + 1) + +/* Run the data callback FOR and don't consume the current byte */ +#define CALLBACK_DATA_NOADVANCE(FOR) \ + CALLBACK_DATA_(FOR, p - FOR##_mark, p - data) + +/* Set the mark FOR; non-destructive if mark is already set */ +#define MARK(FOR) \ +do { \ + if (!FOR##_mark) { \ + FOR##_mark = p; \ + } \ +} while (0) + +/* Don't allow the total size of the HTTP headers (including the status + * line) to exceed max_header_size. This check is here to protect + * embedders against denial-of-service attacks where the attacker feeds + * us a never-ending header that the embedder keeps buffering. + * + * This check is arguably the responsibility of embedders but we're doing + * it on the embedder's behalf because most won't bother and this way we + * make the web a little safer. max_header_size is still far bigger + * than any reasonable request or response so this should never affect + * day-to-day operation. + */ +#define COUNT_HEADER_SIZE(V) \ +do { \ + nread += (uint32_t)(V); \ + if (UNLIKELY(nread > max_header_size)) { \ + SET_ERRNO(HPE_HEADER_OVERFLOW); \ + goto error; \ + } \ +} while (0) + + +#define PROXY_CONNECTION "proxy-connection" +#define CONNECTION "connection" +#define CONTENT_LENGTH "content-length" +#define TRANSFER_ENCODING "transfer-encoding" +#define UPGRADE "upgrade" +#define CHUNKED "chunked" +#define KEEP_ALIVE "keep-alive" +#define CLOSE "close" + + +static const char *method_strings[] = + { +#define XX(num, name, string) #string, + HTTP_METHOD_MAP(XX) +#undef XX + }; + + +/* Tokens as defined by rfc 2616. Also lowercases them. + * token = 1* + * separators = "(" | ")" | "<" | ">" | "@" + * | "," | ";" | ":" | "\" | <"> + * | "/" | "[" | "]" | "?" | "=" + * | "{" | "}" | SP | HT + */ +static const char tokens[256] = { +/* 0 nul 1 soh 2 stx 3 etx 4 eot 5 enq 6 ack 7 bel */ + 0, 0, 0, 0, 0, 0, 0, 0, +/* 8 bs 9 ht 10 nl 11 vt 12 np 13 cr 14 so 15 si */ + 0, 0, 0, 0, 0, 0, 0, 0, +/* 16 dle 17 dc1 18 dc2 19 dc3 20 dc4 21 nak 22 syn 23 etb */ + 0, 0, 0, 0, 0, 0, 0, 0, +/* 24 can 25 em 26 sub 27 esc 28 fs 29 gs 30 rs 31 us */ + 0, 0, 0, 0, 0, 0, 0, 0, +/* 32 sp 33 ! 34 " 35 # 36 $ 37 % 38 & 39 ' */ + ' ', '!', 0, '#', '$', '%', '&', '\'', +/* 40 ( 41 ) 42 * 43 + 44 , 45 - 46 . 47 / */ + 0, 0, '*', '+', 0, '-', '.', 0, +/* 48 0 49 1 50 2 51 3 52 4 53 5 54 6 55 7 */ + '0', '1', '2', '3', '4', '5', '6', '7', +/* 56 8 57 9 58 : 59 ; 60 < 61 = 62 > 63 ? */ + '8', '9', 0, 0, 0, 0, 0, 0, +/* 64 @ 65 A 66 B 67 C 68 D 69 E 70 F 71 G */ + 0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', +/* 72 H 73 I 74 J 75 K 76 L 77 M 78 N 79 O */ + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', +/* 80 P 81 Q 82 R 83 S 84 T 85 U 86 V 87 W */ + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', +/* 88 X 89 Y 90 Z 91 [ 92 \ 93 ] 94 ^ 95 _ */ + 'x', 'y', 'z', 0, 0, 0, '^', '_', +/* 96 ` 97 a 98 b 99 c 100 d 101 e 102 f 103 g */ + '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', +/* 104 h 105 i 106 j 107 k 108 l 109 m 110 n 111 o */ + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', +/* 112 p 113 q 114 r 115 s 116 t 117 u 118 v 119 w */ + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', +/* 120 x 121 y 122 z 123 { 124 | 125 } 126 ~ 127 del */ + 'x', 'y', 'z', 0, '|', 0, '~', 0 }; + + +static const int8_t unhex[256] = + {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 + ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 + ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 + , 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1 + ,-1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1 + ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 + ,-1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1 + ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 + }; + + +#if HTTP_PARSER_STRICT +# define T(v) 0 +#else +# define T(v) v +#endif + + +static const uint8_t normal_url_char[32] = { +/* 0 nul 1 soh 2 stx 3 etx 4 eot 5 enq 6 ack 7 bel */ + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0, +/* 8 bs 9 ht 10 nl 11 vt 12 np 13 cr 14 so 15 si */ + 0 | T(2) | 0 | 0 | T(16) | 0 | 0 | 0, +/* 16 dle 17 dc1 18 dc2 19 dc3 20 dc4 21 nak 22 syn 23 etb */ + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0, +/* 24 can 25 em 26 sub 27 esc 28 fs 29 gs 30 rs 31 us */ + 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0, +/* 32 sp 33 ! 34 " 35 # 36 $ 37 % 38 & 39 ' */ + 0 | 2 | 4 | 0 | 16 | 32 | 64 | 128, +/* 40 ( 41 ) 42 * 43 + 44 , 45 - 46 . 47 / */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 48 0 49 1 50 2 51 3 52 4 53 5 54 6 55 7 */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 56 8 57 9 58 : 59 ; 60 < 61 = 62 > 63 ? */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 0, +/* 64 @ 65 A 66 B 67 C 68 D 69 E 70 F 71 G */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 72 H 73 I 74 J 75 K 76 L 77 M 78 N 79 O */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 80 P 81 Q 82 R 83 S 84 T 85 U 86 V 87 W */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 88 X 89 Y 90 Z 91 [ 92 \ 93 ] 94 ^ 95 _ */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 96 ` 97 a 98 b 99 c 100 d 101 e 102 f 103 g */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 104 h 105 i 106 j 107 k 108 l 109 m 110 n 111 o */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 112 p 113 q 114 r 115 s 116 t 117 u 118 v 119 w */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128, +/* 120 x 121 y 122 z 123 { 124 | 125 } 126 ~ 127 del */ + 1 | 2 | 4 | 8 | 16 | 32 | 64 | 0, }; + +#undef T + +enum state + { s_dead = 1 /* important that this is > 0 */ + + , s_start_req_or_res + , s_res_or_resp_H + , s_start_res + , s_res_H + , s_res_HT + , s_res_HTT + , s_res_HTTP + , s_res_http_major + , s_res_http_dot + , s_res_http_minor + , s_res_http_end + , s_res_first_status_code + , s_res_status_code + , s_res_status_start + , s_res_status + , s_res_line_almost_done + + , s_start_req + + , s_req_method + , s_req_spaces_before_url + , s_req_schema + , s_req_schema_slash + , s_req_schema_slash_slash + , s_req_server_start + , s_req_server + , s_req_server_with_at + , s_req_path + , s_req_query_string_start + , s_req_query_string + , s_req_fragment_start + , s_req_fragment + , s_req_http_start + , s_req_http_H + , s_req_http_HT + , s_req_http_HTT + , s_req_http_HTTP + , s_req_http_I + , s_req_http_IC + , s_req_http_major + , s_req_http_dot + , s_req_http_minor + , s_req_http_end + , s_req_line_almost_done + + , s_header_field_start + , s_header_field + , s_header_value_discard_ws + , s_header_value_discard_ws_almost_done + , s_header_value_discard_lws + , s_header_value_start + , s_header_value + , s_header_value_lws + + , s_header_almost_done + + , s_chunk_size_start + , s_chunk_size + , s_chunk_parameters + , s_chunk_size_almost_done + + , s_headers_almost_done + , s_headers_done + + /* Important: 's_headers_done' must be the last 'header' state. All + * states beyond this must be 'body' states. It is used for overflow + * checking. See the PARSING_HEADER() macro. + */ + + , s_chunk_data + , s_chunk_data_almost_done + , s_chunk_data_done + + , s_body_identity + , s_body_identity_eof + + , s_message_done + }; + + +#define PARSING_HEADER(state) (state <= s_headers_done) + + +enum header_states + { h_general = 0 + , h_C + , h_CO + , h_CON + + , h_matching_connection + , h_matching_proxy_connection + , h_matching_content_length + , h_matching_transfer_encoding + , h_matching_upgrade + + , h_connection + , h_content_length + , h_content_length_num + , h_content_length_ws + , h_transfer_encoding + , h_upgrade + + , h_matching_transfer_encoding_chunked + , h_matching_connection_token_start + , h_matching_connection_keep_alive + , h_matching_connection_close + , h_matching_connection_upgrade + , h_matching_connection_token + + , h_transfer_encoding_chunked + , h_connection_keep_alive + , h_connection_close + , h_connection_upgrade + }; + +enum http_host_state + { + s_http_host_dead = 1 + , s_http_userinfo_start + , s_http_userinfo + , s_http_host_start + , s_http_host_v6_start + , s_http_host + , s_http_host_v6 + , s_http_host_v6_end + , s_http_host_v6_zone_start + , s_http_host_v6_zone + , s_http_host_port_start + , s_http_host_port +}; + +/* Macros for character classes; depends on strict-mode */ +#define CR '\r' +#define LF '\n' +#define LOWER(c) (unsigned char)(c | 0x20) +#define IS_ALPHA(c) (LOWER(c) >= 'a' && LOWER(c) <= 'z') +#define IS_NUM(c) ((c) >= '0' && (c) <= '9') +#define IS_ALPHANUM(c) (IS_ALPHA(c) || IS_NUM(c)) +#define IS_HEX(c) (IS_NUM(c) || (LOWER(c) >= 'a' && LOWER(c) <= 'f')) +#define IS_MARK(c) ((c) == '-' || (c) == '_' || (c) == '.' || \ + (c) == '!' || (c) == '~' || (c) == '*' || (c) == '\'' || (c) == '(' || \ + (c) == ')') +#define IS_USERINFO_CHAR(c) (IS_ALPHANUM(c) || IS_MARK(c) || (c) == '%' || \ + (c) == ';' || (c) == ':' || (c) == '&' || (c) == '=' || (c) == '+' || \ + (c) == '$' || (c) == ',') + +#define STRICT_TOKEN(c) ((c == ' ') ? 0 : tokens[(unsigned char)c]) + +#if HTTP_PARSER_STRICT +#define TOKEN(c) STRICT_TOKEN(c) +#define IS_URL_CHAR(c) (BIT_AT(normal_url_char, (unsigned char)c)) +#define IS_HOST_CHAR(c) (IS_ALPHANUM(c) || (c) == '.' || (c) == '-') +#else +#define TOKEN(c) tokens[(unsigned char)c] +#define IS_URL_CHAR(c) \ + (BIT_AT(normal_url_char, (unsigned char)c) || ((c) & 0x80)) +#define IS_HOST_CHAR(c) \ + (IS_ALPHANUM(c) || (c) == '.' || (c) == '-' || (c) == '_') +#endif + +/** + * Verify that a char is a valid visible (printable) US-ASCII + * character or %x80-FF + **/ +#define IS_HEADER_CHAR(ch) \ + (ch == CR || ch == LF || ch == 9 || ((unsigned char)ch > 31 && ch != 127)) + +#define start_state (parser->type == HTTP_REQUEST ? s_start_req : s_start_res) + + +#if HTTP_PARSER_STRICT +# define STRICT_CHECK(cond) \ +do { \ + if (cond) { \ + SET_ERRNO(HPE_STRICT); \ + goto error; \ + } \ +} while (0) +# define NEW_MESSAGE() (http_should_keep_alive(parser) ? start_state : s_dead) +#else +# define STRICT_CHECK(cond) +# define NEW_MESSAGE() start_state +#endif + + +/* Map errno values to strings for human-readable output */ +#define HTTP_STRERROR_GEN(n, s) { "HPE_" #n, s }, +static struct { + const char *name; + const char *description; +} http_strerror_tab[] = { + HTTP_ERRNO_MAP(HTTP_STRERROR_GEN) +}; +#undef HTTP_STRERROR_GEN + +int http_message_needs_eof(const http_parser *parser); + +/* Our URL parser. + * + * This is designed to be shared by http_parser_execute() for URL validation, + * hence it has a state transition + byte-for-byte interface. In addition, it + * is meant to be embedded in http_parser_parse_url(), which does the dirty + * work of turning state transitions URL components for its API. + * + * This function should only be invoked with non-space characters. It is + * assumed that the caller cares about (and can detect) the transition between + * URL and non-URL states by looking for these. + */ +static enum state +parse_url_char(enum state s, const char ch) +{ + if (ch == ' ' || ch == '\r' || ch == '\n') { + return s_dead; + } + +#if HTTP_PARSER_STRICT + if (ch == '\t' || ch == '\f') { + return s_dead; + } +#endif + + switch (s) { + case s_req_spaces_before_url: + /* Proxied requests are followed by scheme of an absolute URI (alpha). + * All methods except CONNECT are followed by '/' or '*'. + */ + + if (ch == '/' || ch == '*') { + return s_req_path; + } + + if (IS_ALPHA(ch)) { + return s_req_schema; + } + + break; + + case s_req_schema: + if (IS_ALPHA(ch)) { + return s; + } + + if (ch == ':') { + return s_req_schema_slash; + } + + break; + + case s_req_schema_slash: + if (ch == '/') { + return s_req_schema_slash_slash; + } + + break; + + case s_req_schema_slash_slash: + if (ch == '/') { + return s_req_server_start; + } + + break; + + case s_req_server_with_at: + if (ch == '@') { + return s_dead; + } + + /* fall through */ + case s_req_server_start: + case s_req_server: + if (ch == '/') { + return s_req_path; + } + + if (ch == '?') { + return s_req_query_string_start; + } + + if (ch == '@') { + return s_req_server_with_at; + } + + if (IS_USERINFO_CHAR(ch) || ch == '[' || ch == ']') { + return s_req_server; + } + + break; + + case s_req_path: + if (IS_URL_CHAR(ch)) { + return s; + } + + switch (ch) { + case '?': + return s_req_query_string_start; + + case '#': + return s_req_fragment_start; + } + + break; + + case s_req_query_string_start: + case s_req_query_string: + if (IS_URL_CHAR(ch)) { + return s_req_query_string; + } + + switch (ch) { + case '?': + /* allow extra '?' in query string */ + return s_req_query_string; + + case '#': + return s_req_fragment_start; + } + + break; + + case s_req_fragment_start: + if (IS_URL_CHAR(ch)) { + return s_req_fragment; + } + + switch (ch) { + case '?': + return s_req_fragment; + + case '#': + return s; + } + + break; + + case s_req_fragment: + if (IS_URL_CHAR(ch)) { + return s; + } + + switch (ch) { + case '?': + case '#': + return s; + } + + break; + + default: + break; + } + + /* We should never fall out of the switch above unless there's an error */ + return s_dead; +} + +size_t http_parser_execute (http_parser *parser, + const http_parser_settings *settings, + const char *data, + size_t len) +{ + char c, ch; + int8_t unhex_val; + const char *p = data; + const char *header_field_mark = 0; + const char *header_value_mark = 0; + const char *url_mark = 0; + const char *body_mark = 0; + const char *status_mark = 0; + enum state p_state = (enum state) parser->state; + const unsigned int lenient = parser->lenient_http_headers; + uint32_t nread = parser->nread; + + /* We're in an error state. Don't bother doing anything. */ + if (HTTP_PARSER_ERRNO(parser) != HPE_OK) { + return 0; + } + + if (len == 0) { + switch (CURRENT_STATE()) { + case s_body_identity_eof: + /* Use of CALLBACK_NOTIFY() here would erroneously return 1 byte read if + * we got paused. + */ + CALLBACK_NOTIFY_NOADVANCE(message_complete); + return 0; + + case s_dead: + case s_start_req_or_res: + case s_start_res: + case s_start_req: + return 0; + + default: + SET_ERRNO(HPE_INVALID_EOF_STATE); + return 1; + } + } + + + if (CURRENT_STATE() == s_header_field) + header_field_mark = data; + if (CURRENT_STATE() == s_header_value) + header_value_mark = data; + switch (CURRENT_STATE()) { + case s_req_path: + case s_req_schema: + case s_req_schema_slash: + case s_req_schema_slash_slash: + case s_req_server_start: + case s_req_server: + case s_req_server_with_at: + case s_req_query_string_start: + case s_req_query_string: + case s_req_fragment_start: + case s_req_fragment: + url_mark = data; + break; + case s_res_status: + status_mark = data; + break; + default: + break; + } + + for (p=data; p != data + len; p++) { + ch = *p; + + if (PARSING_HEADER(CURRENT_STATE())) + COUNT_HEADER_SIZE(1); + +reexecute: + switch (CURRENT_STATE()) { + + case s_dead: + /* this state is used after a 'Connection: close' message + * the parser will error out if it reads another message + */ + if (LIKELY(ch == CR || ch == LF)) + break; + + SET_ERRNO(HPE_CLOSED_CONNECTION); + goto error; + + case s_start_req_or_res: + { + if (ch == CR || ch == LF) + break; + parser->flags = 0; + parser->content_length = ULLONG_MAX; + + if (ch == 'H') { + UPDATE_STATE(s_res_or_resp_H); + + CALLBACK_NOTIFY(message_begin); + } else { + parser->type = HTTP_REQUEST; + UPDATE_STATE(s_start_req); + REEXECUTE(); + } + + break; + } + + case s_res_or_resp_H: + if (ch == 'T') { + parser->type = HTTP_RESPONSE; + UPDATE_STATE(s_res_HT); + } else { + if (UNLIKELY(ch != 'E')) { + SET_ERRNO(HPE_INVALID_CONSTANT); + goto error; + } + + parser->type = HTTP_REQUEST; + parser->method = HTTP_HEAD; + parser->index = 2; + UPDATE_STATE(s_req_method); + } + break; + + case s_start_res: + { + if (ch == CR || ch == LF) + break; + parser->flags = 0; + parser->content_length = ULLONG_MAX; + + if (ch == 'H') { + UPDATE_STATE(s_res_H); + } else { + SET_ERRNO(HPE_INVALID_CONSTANT); + goto error; + } + + CALLBACK_NOTIFY(message_begin); + break; + } + + case s_res_H: + STRICT_CHECK(ch != 'T'); + UPDATE_STATE(s_res_HT); + break; + + case s_res_HT: + STRICT_CHECK(ch != 'T'); + UPDATE_STATE(s_res_HTT); + break; + + case s_res_HTT: + STRICT_CHECK(ch != 'P'); + UPDATE_STATE(s_res_HTTP); + break; + + case s_res_HTTP: + STRICT_CHECK(ch != '/'); + UPDATE_STATE(s_res_http_major); + break; + + case s_res_http_major: + if (UNLIKELY(!IS_NUM(ch))) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + parser->http_major = ch - '0'; + UPDATE_STATE(s_res_http_dot); + break; + + case s_res_http_dot: + { + if (UNLIKELY(ch != '.')) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + UPDATE_STATE(s_res_http_minor); + break; + } + + case s_res_http_minor: + if (UNLIKELY(!IS_NUM(ch))) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + parser->http_minor = ch - '0'; + UPDATE_STATE(s_res_http_end); + break; + + case s_res_http_end: + { + if (UNLIKELY(ch != ' ')) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + UPDATE_STATE(s_res_first_status_code); + break; + } + + case s_res_first_status_code: + { + if (!IS_NUM(ch)) { + if (ch == ' ') { + break; + } + + SET_ERRNO(HPE_INVALID_STATUS); + goto error; + } + parser->status_code = ch - '0'; + UPDATE_STATE(s_res_status_code); + break; + } + + case s_res_status_code: + { + if (!IS_NUM(ch)) { + switch (ch) { + case ' ': + UPDATE_STATE(s_res_status_start); + break; + case CR: + case LF: + UPDATE_STATE(s_res_status_start); + REEXECUTE(); + break; + default: + SET_ERRNO(HPE_INVALID_STATUS); + goto error; + } + break; + } + + parser->status_code *= 10; + parser->status_code += ch - '0'; + + if (UNLIKELY(parser->status_code > 999)) { + SET_ERRNO(HPE_INVALID_STATUS); + goto error; + } + + break; + } + + case s_res_status_start: + { + MARK(status); + UPDATE_STATE(s_res_status); + parser->index = 0; + + if (ch == CR || ch == LF) + REEXECUTE(); + + break; + } + + case s_res_status: + if (ch == CR) { + UPDATE_STATE(s_res_line_almost_done); + CALLBACK_DATA(status); + break; + } + + if (ch == LF) { + UPDATE_STATE(s_header_field_start); + CALLBACK_DATA(status); + break; + } + + break; + + case s_res_line_almost_done: + STRICT_CHECK(ch != LF); + UPDATE_STATE(s_header_field_start); + break; + + case s_start_req: + { + if (ch == CR || ch == LF) + break; + parser->flags = 0; + parser->content_length = ULLONG_MAX; + + if (UNLIKELY(!IS_ALPHA(ch))) { + SET_ERRNO(HPE_INVALID_METHOD); + goto error; + } + + parser->method = (enum http_method) 0; + parser->index = 1; + switch (ch) { + case 'A': parser->method = HTTP_ACL; break; + case 'B': parser->method = HTTP_BIND; break; + case 'C': parser->method = HTTP_CONNECT; /* or COPY, CHECKOUT */ break; + case 'D': parser->method = HTTP_DELETE; break; + case 'G': parser->method = HTTP_GET; break; + case 'H': parser->method = HTTP_HEAD; break; + case 'L': parser->method = HTTP_LOCK; /* or LINK */ break; + case 'M': parser->method = HTTP_MKCOL; /* or MOVE, MKACTIVITY, MERGE, M-SEARCH, MKCALENDAR */ break; + case 'N': parser->method = HTTP_NOTIFY; break; + case 'O': parser->method = HTTP_OPTIONS; break; + case 'P': parser->method = HTTP_POST; + /* or PROPFIND|PROPPATCH|PUT|PATCH|PURGE */ + break; + case 'R': parser->method = HTTP_REPORT; /* or REBIND */ break; + case 'S': parser->method = HTTP_SUBSCRIBE; /* or SEARCH, SOURCE */ break; + case 'T': parser->method = HTTP_TRACE; break; + case 'U': parser->method = HTTP_UNLOCK; /* or UNSUBSCRIBE, UNBIND, UNLINK */ break; + default: + SET_ERRNO(HPE_INVALID_METHOD); + goto error; + } + UPDATE_STATE(s_req_method); + + CALLBACK_NOTIFY(message_begin); + + break; + } + + case s_req_method: + { + const char *matcher; + if (UNLIKELY(ch == '\0')) { + SET_ERRNO(HPE_INVALID_METHOD); + goto error; + } + + matcher = method_strings[parser->method]; + if (ch == ' ' && matcher[parser->index] == '\0') { + UPDATE_STATE(s_req_spaces_before_url); + } else if (ch == matcher[parser->index]) { + ; /* nada */ + } else if ((ch >= 'A' && ch <= 'Z') || ch == '-') { + + switch (parser->method << 16 | parser->index << 8 | ch) { +#define XX(meth, pos, ch, new_meth) \ + case (HTTP_##meth << 16 | pos << 8 | ch): \ + parser->method = HTTP_##new_meth; break; + + XX(POST, 1, 'U', PUT) + XX(POST, 1, 'A', PATCH) + XX(POST, 1, 'R', PROPFIND) + XX(PUT, 2, 'R', PURGE) + XX(CONNECT, 1, 'H', CHECKOUT) + XX(CONNECT, 2, 'P', COPY) + XX(MKCOL, 1, 'O', MOVE) + XX(MKCOL, 1, 'E', MERGE) + XX(MKCOL, 1, '-', MSEARCH) + XX(MKCOL, 2, 'A', MKACTIVITY) + XX(MKCOL, 3, 'A', MKCALENDAR) + XX(SUBSCRIBE, 1, 'E', SEARCH) + XX(SUBSCRIBE, 1, 'O', SOURCE) + XX(REPORT, 2, 'B', REBIND) + XX(PROPFIND, 4, 'P', PROPPATCH) + XX(LOCK, 1, 'I', LINK) + XX(UNLOCK, 2, 'S', UNSUBSCRIBE) + XX(UNLOCK, 2, 'B', UNBIND) + XX(UNLOCK, 3, 'I', UNLINK) +#undef XX + default: + SET_ERRNO(HPE_INVALID_METHOD); + goto error; + } + } else { + SET_ERRNO(HPE_INVALID_METHOD); + goto error; + } + + ++parser->index; + break; + } + + case s_req_spaces_before_url: + { + if (ch == ' ') break; + + MARK(url); + if (parser->method == HTTP_CONNECT) { + UPDATE_STATE(s_req_server_start); + } + + UPDATE_STATE(parse_url_char(CURRENT_STATE(), ch)); + if (UNLIKELY(CURRENT_STATE() == s_dead)) { + SET_ERRNO(HPE_INVALID_URL); + goto error; + } + + break; + } + + case s_req_schema: + case s_req_schema_slash: + case s_req_schema_slash_slash: + case s_req_server_start: + { + switch (ch) { + /* No whitespace allowed here */ + case ' ': + case CR: + case LF: + SET_ERRNO(HPE_INVALID_URL); + goto error; + default: + UPDATE_STATE(parse_url_char(CURRENT_STATE(), ch)); + if (UNLIKELY(CURRENT_STATE() == s_dead)) { + SET_ERRNO(HPE_INVALID_URL); + goto error; + } + } + + break; + } + + case s_req_server: + case s_req_server_with_at: + case s_req_path: + case s_req_query_string_start: + case s_req_query_string: + case s_req_fragment_start: + case s_req_fragment: + { + switch (ch) { + case ' ': + UPDATE_STATE(s_req_http_start); + CALLBACK_DATA(url); + break; + case CR: + case LF: + parser->http_major = 0; + parser->http_minor = 9; + UPDATE_STATE((ch == CR) ? + s_req_line_almost_done : + s_header_field_start); + CALLBACK_DATA(url); + break; + default: + UPDATE_STATE(parse_url_char(CURRENT_STATE(), ch)); + if (UNLIKELY(CURRENT_STATE() == s_dead)) { + SET_ERRNO(HPE_INVALID_URL); + goto error; + } + } + break; + } + + case s_req_http_start: + switch (ch) { + case ' ': + break; + case 'H': + UPDATE_STATE(s_req_http_H); + break; + case 'I': + if (parser->method == HTTP_SOURCE) { + UPDATE_STATE(s_req_http_I); + break; + } + /* fall through */ + default: + SET_ERRNO(HPE_INVALID_CONSTANT); + goto error; + } + break; + + case s_req_http_H: + STRICT_CHECK(ch != 'T'); + UPDATE_STATE(s_req_http_HT); + break; + + case s_req_http_HT: + STRICT_CHECK(ch != 'T'); + UPDATE_STATE(s_req_http_HTT); + break; + + case s_req_http_HTT: + STRICT_CHECK(ch != 'P'); + UPDATE_STATE(s_req_http_HTTP); + break; + + case s_req_http_I: + STRICT_CHECK(ch != 'C'); + UPDATE_STATE(s_req_http_IC); + break; + + case s_req_http_IC: + STRICT_CHECK(ch != 'E'); + UPDATE_STATE(s_req_http_HTTP); /* Treat "ICE" as "HTTP". */ + break; + + case s_req_http_HTTP: + STRICT_CHECK(ch != '/'); + UPDATE_STATE(s_req_http_major); + break; + + case s_req_http_major: + if (UNLIKELY(!IS_NUM(ch))) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + parser->http_major = ch - '0'; + UPDATE_STATE(s_req_http_dot); + break; + + case s_req_http_dot: + { + if (UNLIKELY(ch != '.')) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + UPDATE_STATE(s_req_http_minor); + break; + } + + case s_req_http_minor: + if (UNLIKELY(!IS_NUM(ch))) { + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + } + + parser->http_minor = ch - '0'; + UPDATE_STATE(s_req_http_end); + break; + + case s_req_http_end: + { + if (ch == CR) { + UPDATE_STATE(s_req_line_almost_done); + break; + } + + if (ch == LF) { + UPDATE_STATE(s_header_field_start); + break; + } + + SET_ERRNO(HPE_INVALID_VERSION); + goto error; + break; + } + + /* end of request line */ + case s_req_line_almost_done: + { + if (UNLIKELY(ch != LF)) { + SET_ERRNO(HPE_LF_EXPECTED); + goto error; + } + + UPDATE_STATE(s_header_field_start); + break; + } + + case s_header_field_start: + { + if (ch == CR) { + UPDATE_STATE(s_headers_almost_done); + break; + } + + if (ch == LF) { + /* they might be just sending \n instead of \r\n so this would be + * the second \n to denote the end of headers*/ + UPDATE_STATE(s_headers_almost_done); + REEXECUTE(); + } + + c = TOKEN(ch); + + if (UNLIKELY(!c)) { + SET_ERRNO(HPE_INVALID_HEADER_TOKEN); + goto error; + } + + MARK(header_field); + + parser->index = 0; + UPDATE_STATE(s_header_field); + + switch (c) { + case 'c': + parser->header_state = h_C; + break; + + case 'p': + parser->header_state = h_matching_proxy_connection; + break; + + case 't': + parser->header_state = h_matching_transfer_encoding; + break; + + case 'u': + parser->header_state = h_matching_upgrade; + break; + + default: + parser->header_state = h_general; + break; + } + break; + } + + case s_header_field: + { + const char* start = p; + for (; p != data + len; p++) { + ch = *p; + c = TOKEN(ch); + + if (!c) + break; + + switch (parser->header_state) { + case h_general: { + size_t left = data + len - p; + const char* pe = p + MIN(left, max_header_size); + while (p+1 < pe && TOKEN(p[1])) { + p++; + } + break; + } + + case h_C: + parser->index++; + parser->header_state = (c == 'o' ? h_CO : h_general); + break; + + case h_CO: + parser->index++; + parser->header_state = (c == 'n' ? h_CON : h_general); + break; + + case h_CON: + parser->index++; + switch (c) { + case 'n': + parser->header_state = h_matching_connection; + break; + case 't': + parser->header_state = h_matching_content_length; + break; + default: + parser->header_state = h_general; + break; + } + break; + + /* connection */ + + case h_matching_connection: + parser->index++; + if (parser->index > sizeof(CONNECTION)-1 + || c != CONNECTION[parser->index]) { + parser->header_state = h_general; + } else if (parser->index == sizeof(CONNECTION)-2) { + parser->header_state = h_connection; + } + break; + + /* proxy-connection */ + + case h_matching_proxy_connection: + parser->index++; + if (parser->index > sizeof(PROXY_CONNECTION)-1 + || c != PROXY_CONNECTION[parser->index]) { + parser->header_state = h_general; + } else if (parser->index == sizeof(PROXY_CONNECTION)-2) { + parser->header_state = h_connection; + } + break; + + /* content-length */ + + case h_matching_content_length: + parser->index++; + if (parser->index > sizeof(CONTENT_LENGTH)-1 + || c != CONTENT_LENGTH[parser->index]) { + parser->header_state = h_general; + } else if (parser->index == sizeof(CONTENT_LENGTH)-2) { + parser->header_state = h_content_length; + } + break; + + /* transfer-encoding */ + + case h_matching_transfer_encoding: + parser->index++; + if (parser->index > sizeof(TRANSFER_ENCODING)-1 + || c != TRANSFER_ENCODING[parser->index]) { + parser->header_state = h_general; + } else if (parser->index == sizeof(TRANSFER_ENCODING)-2) { + parser->header_state = h_transfer_encoding; + } + break; + + /* upgrade */ + + case h_matching_upgrade: + parser->index++; + if (parser->index > sizeof(UPGRADE)-1 + || c != UPGRADE[parser->index]) { + parser->header_state = h_general; + } else if (parser->index == sizeof(UPGRADE)-2) { + parser->header_state = h_upgrade; + } + break; + + case h_connection: + case h_content_length: + case h_transfer_encoding: + case h_upgrade: + if (ch != ' ') parser->header_state = h_general; + break; + + default: + assert(0 && "Unknown header_state"); + break; + } + } + + if (p == data + len) { + --p; + COUNT_HEADER_SIZE(p - start); + break; + } + + COUNT_HEADER_SIZE(p - start); + + if (ch == ':') { + UPDATE_STATE(s_header_value_discard_ws); + CALLBACK_DATA(header_field); + break; + } + + SET_ERRNO(HPE_INVALID_HEADER_TOKEN); + goto error; + } + + case s_header_value_discard_ws: + if (ch == ' ' || ch == '\t') break; + + if (ch == CR) { + UPDATE_STATE(s_header_value_discard_ws_almost_done); + break; + } + + if (ch == LF) { + UPDATE_STATE(s_header_value_discard_lws); + break; + } + + /* fall through */ + + case s_header_value_start: + { + MARK(header_value); + + UPDATE_STATE(s_header_value); + parser->index = 0; + + c = LOWER(ch); + + switch (parser->header_state) { + case h_upgrade: + parser->flags |= F_UPGRADE; + parser->header_state = h_general; + break; + + case h_transfer_encoding: + /* looking for 'Transfer-Encoding: chunked' */ + if ('c' == c) { + parser->header_state = h_matching_transfer_encoding_chunked; + } else { + parser->header_state = h_general; + } + break; + + case h_content_length: + if (UNLIKELY(!IS_NUM(ch))) { + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + goto error; + } + + if (parser->flags & F_CONTENTLENGTH) { + SET_ERRNO(HPE_UNEXPECTED_CONTENT_LENGTH); + goto error; + } + + parser->flags |= F_CONTENTLENGTH; + parser->content_length = ch - '0'; + parser->header_state = h_content_length_num; + break; + + /* when obsolete line folding is encountered for content length + * continue to the s_header_value state */ + case h_content_length_ws: + break; + + case h_connection: + /* looking for 'Connection: keep-alive' */ + if (c == 'k') { + parser->header_state = h_matching_connection_keep_alive; + /* looking for 'Connection: close' */ + } else if (c == 'c') { + parser->header_state = h_matching_connection_close; + } else if (c == 'u') { + parser->header_state = h_matching_connection_upgrade; + } else { + parser->header_state = h_matching_connection_token; + } + break; + + /* Multi-value `Connection` header */ + case h_matching_connection_token_start: + break; + + default: + parser->header_state = h_general; + break; + } + break; + } + + case s_header_value: + { + const char* start = p; + enum header_states h_state = (enum header_states) parser->header_state; + for (; p != data + len; p++) { + ch = *p; + if (ch == CR) { + UPDATE_STATE(s_header_almost_done); + parser->header_state = h_state; + CALLBACK_DATA(header_value); + break; + } + + if (ch == LF) { + UPDATE_STATE(s_header_almost_done); + COUNT_HEADER_SIZE(p - start); + parser->header_state = h_state; + CALLBACK_DATA_NOADVANCE(header_value); + REEXECUTE(); + } + + if (!lenient && !IS_HEADER_CHAR(ch)) { + SET_ERRNO(HPE_INVALID_HEADER_TOKEN); + goto error; + } + + c = LOWER(ch); + + switch (h_state) { + case h_general: + { + size_t left = data + len - p; + const char* pe = p + MIN(left, max_header_size); + + for (; p != pe; p++) { + ch = *p; + if (ch == CR || ch == LF) { + --p; + break; + } + if (!lenient && !IS_HEADER_CHAR(ch)) { + SET_ERRNO(HPE_INVALID_HEADER_TOKEN); + goto error; + } + } + if (p == data + len) + --p; + break; + } + + case h_connection: + case h_transfer_encoding: + assert(0 && "Shouldn't get here."); + break; + + case h_content_length: + if (ch == ' ') break; + h_state = h_content_length_num; + /* fall through */ + + case h_content_length_num: + { + uint64_t t; + + if (ch == ' ') { + h_state = h_content_length_ws; + break; + } + + if (UNLIKELY(!IS_NUM(ch))) { + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + parser->header_state = h_state; + goto error; + } + + t = parser->content_length; + t *= 10; + t += ch - '0'; + + /* Overflow? Test against a conservative limit for simplicity. */ + if (UNLIKELY((ULLONG_MAX - 10) / 10 < parser->content_length)) { + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + parser->header_state = h_state; + goto error; + } + + parser->content_length = t; + break; + } + + case h_content_length_ws: + if (ch == ' ') break; + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + parser->header_state = h_state; + goto error; + + /* Transfer-Encoding: chunked */ + case h_matching_transfer_encoding_chunked: + parser->index++; + if (parser->index > sizeof(CHUNKED)-1 + || c != CHUNKED[parser->index]) { + h_state = h_general; + } else if (parser->index == sizeof(CHUNKED)-2) { + h_state = h_transfer_encoding_chunked; + } + break; + + case h_matching_connection_token_start: + /* looking for 'Connection: keep-alive' */ + if (c == 'k') { + h_state = h_matching_connection_keep_alive; + /* looking for 'Connection: close' */ + } else if (c == 'c') { + h_state = h_matching_connection_close; + } else if (c == 'u') { + h_state = h_matching_connection_upgrade; + } else if (STRICT_TOKEN(c)) { + h_state = h_matching_connection_token; + } else if (c == ' ' || c == '\t') { + /* Skip lws */ + } else { + h_state = h_general; + } + break; + + /* looking for 'Connection: keep-alive' */ + case h_matching_connection_keep_alive: + parser->index++; + if (parser->index > sizeof(KEEP_ALIVE)-1 + || c != KEEP_ALIVE[parser->index]) { + h_state = h_matching_connection_token; + } else if (parser->index == sizeof(KEEP_ALIVE)-2) { + h_state = h_connection_keep_alive; + } + break; + + /* looking for 'Connection: close' */ + case h_matching_connection_close: + parser->index++; + if (parser->index > sizeof(CLOSE)-1 || c != CLOSE[parser->index]) { + h_state = h_matching_connection_token; + } else if (parser->index == sizeof(CLOSE)-2) { + h_state = h_connection_close; + } + break; + + /* looking for 'Connection: upgrade' */ + case h_matching_connection_upgrade: + parser->index++; + if (parser->index > sizeof(UPGRADE) - 1 || + c != UPGRADE[parser->index]) { + h_state = h_matching_connection_token; + } else if (parser->index == sizeof(UPGRADE)-2) { + h_state = h_connection_upgrade; + } + break; + + case h_matching_connection_token: + if (ch == ',') { + h_state = h_matching_connection_token_start; + parser->index = 0; + } + break; + + case h_transfer_encoding_chunked: + if (ch != ' ') h_state = h_general; + break; + + case h_connection_keep_alive: + case h_connection_close: + case h_connection_upgrade: + if (ch == ',') { + if (h_state == h_connection_keep_alive) { + parser->flags |= F_CONNECTION_KEEP_ALIVE; + } else if (h_state == h_connection_close) { + parser->flags |= F_CONNECTION_CLOSE; + } else if (h_state == h_connection_upgrade) { + parser->flags |= F_CONNECTION_UPGRADE; + } + h_state = h_matching_connection_token_start; + parser->index = 0; + } else if (ch != ' ') { + h_state = h_matching_connection_token; + } + break; + + default: + UPDATE_STATE(s_header_value); + h_state = h_general; + break; + } + } + parser->header_state = h_state; + + if (p == data + len) + --p; + + COUNT_HEADER_SIZE(p - start); + break; + } + + case s_header_almost_done: + { + if (UNLIKELY(ch != LF)) { + SET_ERRNO(HPE_LF_EXPECTED); + goto error; + } + + UPDATE_STATE(s_header_value_lws); + break; + } + + case s_header_value_lws: + { + if (ch == ' ' || ch == '\t') { + if (parser->header_state == h_content_length_num) { + /* treat obsolete line folding as space */ + parser->header_state = h_content_length_ws; + } + UPDATE_STATE(s_header_value_start); + REEXECUTE(); + } + + /* finished the header */ + switch (parser->header_state) { + case h_connection_keep_alive: + parser->flags |= F_CONNECTION_KEEP_ALIVE; + break; + case h_connection_close: + parser->flags |= F_CONNECTION_CLOSE; + break; + case h_transfer_encoding_chunked: + parser->flags |= F_CHUNKED; + break; + case h_connection_upgrade: + parser->flags |= F_CONNECTION_UPGRADE; + break; + default: + break; + } + + UPDATE_STATE(s_header_field_start); + REEXECUTE(); + } + + case s_header_value_discard_ws_almost_done: + { + STRICT_CHECK(ch != LF); + UPDATE_STATE(s_header_value_discard_lws); + break; + } + + case s_header_value_discard_lws: + { + if (ch == ' ' || ch == '\t') { + UPDATE_STATE(s_header_value_discard_ws); + break; + } else { + switch (parser->header_state) { + case h_connection_keep_alive: + parser->flags |= F_CONNECTION_KEEP_ALIVE; + break; + case h_connection_close: + parser->flags |= F_CONNECTION_CLOSE; + break; + case h_connection_upgrade: + parser->flags |= F_CONNECTION_UPGRADE; + break; + case h_transfer_encoding_chunked: + parser->flags |= F_CHUNKED; + break; + case h_content_length: + /* do not allow empty content length */ + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + goto error; + break; + default: + break; + } + + /* header value was empty */ + MARK(header_value); + UPDATE_STATE(s_header_field_start); + CALLBACK_DATA_NOADVANCE(header_value); + REEXECUTE(); + } + } + + case s_headers_almost_done: + { + STRICT_CHECK(ch != LF); + + if (parser->flags & F_TRAILING) { + /* End of a chunked request */ + UPDATE_STATE(s_message_done); + CALLBACK_NOTIFY_NOADVANCE(chunk_complete); + REEXECUTE(); + } + + /* Cannot use chunked encoding and a content-length header together + per the HTTP specification. */ + if ((parser->flags & F_CHUNKED) && + (parser->flags & F_CONTENTLENGTH)) { + SET_ERRNO(HPE_UNEXPECTED_CONTENT_LENGTH); + goto error; + } + + UPDATE_STATE(s_headers_done); + + /* Set this here so that on_headers_complete() callbacks can see it */ + if ((parser->flags & F_UPGRADE) && + (parser->flags & F_CONNECTION_UPGRADE)) { + /* For responses, "Upgrade: foo" and "Connection: upgrade" are + * mandatory only when it is a 101 Switching Protocols response, + * otherwise it is purely informational, to announce support. + */ + parser->upgrade = + (parser->type == HTTP_REQUEST || parser->status_code == 101); + } else { + parser->upgrade = (parser->method == HTTP_CONNECT); + } + + /* Here we call the headers_complete callback. This is somewhat + * different than other callbacks because if the user returns 1, we + * will interpret that as saying that this message has no body. This + * is needed for the annoying case of recieving a response to a HEAD + * request. + * + * We'd like to use CALLBACK_NOTIFY_NOADVANCE() here but we cannot, so + * we have to simulate it by handling a change in errno below. + */ + if (settings->on_headers_complete) { + switch (settings->on_headers_complete(parser)) { + case 0: + break; + + case 2: + parser->upgrade = 1; + + /* fall through */ + case 1: + parser->flags |= F_SKIPBODY; + break; + + default: + SET_ERRNO(HPE_CB_headers_complete); + RETURN(p - data); /* Error */ + } + } + + if (HTTP_PARSER_ERRNO(parser) != HPE_OK) { + RETURN(p - data); + } + + REEXECUTE(); + } + + case s_headers_done: + { + int hasBody; + STRICT_CHECK(ch != LF); + + parser->nread = 0; + nread = 0; + + hasBody = parser->flags & F_CHUNKED || + (parser->content_length > 0 && parser->content_length != ULLONG_MAX); + if (parser->upgrade && (parser->method == HTTP_CONNECT || + (parser->flags & F_SKIPBODY) || !hasBody)) { + /* Exit, the rest of the message is in a different protocol. */ + UPDATE_STATE(NEW_MESSAGE()); + CALLBACK_NOTIFY(message_complete); + RETURN((p - data) + 1); + } + + if (parser->flags & F_SKIPBODY) { + UPDATE_STATE(NEW_MESSAGE()); + CALLBACK_NOTIFY(message_complete); + } else if (parser->flags & F_CHUNKED) { + /* chunked encoding - ignore Content-Length header */ + UPDATE_STATE(s_chunk_size_start); + } else { + if (parser->content_length == 0) { + /* Content-Length header given but zero: Content-Length: 0\r\n */ + UPDATE_STATE(NEW_MESSAGE()); + CALLBACK_NOTIFY(message_complete); + } else if (parser->content_length != ULLONG_MAX) { + /* Content-Length header given and non-zero */ + UPDATE_STATE(s_body_identity); + } else { + if (!http_message_needs_eof(parser)) { + /* Assume content-length 0 - read the next */ + UPDATE_STATE(NEW_MESSAGE()); + CALLBACK_NOTIFY(message_complete); + } else { + /* Read body until EOF */ + UPDATE_STATE(s_body_identity_eof); + } + } + } + + break; + } + + case s_body_identity: + { + uint64_t to_read = MIN(parser->content_length, + (uint64_t) ((data + len) - p)); + + assert(parser->content_length != 0 + && parser->content_length != ULLONG_MAX); + + /* The difference between advancing content_length and p is because + * the latter will automaticaly advance on the next loop iteration. + * Further, if content_length ends up at 0, we want to see the last + * byte again for our message complete callback. + */ + MARK(body); + parser->content_length -= to_read; + p += to_read - 1; + + if (parser->content_length == 0) { + UPDATE_STATE(s_message_done); + + /* Mimic CALLBACK_DATA_NOADVANCE() but with one extra byte. + * + * The alternative to doing this is to wait for the next byte to + * trigger the data callback, just as in every other case. The + * problem with this is that this makes it difficult for the test + * harness to distinguish between complete-on-EOF and + * complete-on-length. It's not clear that this distinction is + * important for applications, but let's keep it for now. + */ + CALLBACK_DATA_(body, p - body_mark + 1, p - data); + REEXECUTE(); + } + + break; + } + + /* read until EOF */ + case s_body_identity_eof: + MARK(body); + p = data + len - 1; + + break; + + case s_message_done: + UPDATE_STATE(NEW_MESSAGE()); + CALLBACK_NOTIFY(message_complete); + if (parser->upgrade) { + /* Exit, the rest of the message is in a different protocol. */ + RETURN((p - data) + 1); + } + break; + + case s_chunk_size_start: + { + assert(nread == 1); + assert(parser->flags & F_CHUNKED); + + unhex_val = unhex[(unsigned char)ch]; + if (UNLIKELY(unhex_val == -1)) { + SET_ERRNO(HPE_INVALID_CHUNK_SIZE); + goto error; + } + + parser->content_length = unhex_val; + UPDATE_STATE(s_chunk_size); + break; + } + + case s_chunk_size: + { + uint64_t t; + + assert(parser->flags & F_CHUNKED); + + if (ch == CR) { + UPDATE_STATE(s_chunk_size_almost_done); + break; + } + + unhex_val = unhex[(unsigned char)ch]; + + if (unhex_val == -1) { + if (ch == ';' || ch == ' ') { + UPDATE_STATE(s_chunk_parameters); + break; + } + + SET_ERRNO(HPE_INVALID_CHUNK_SIZE); + goto error; + } + + t = parser->content_length; + t *= 16; + t += unhex_val; + + /* Overflow? Test against a conservative limit for simplicity. */ + if (UNLIKELY((ULLONG_MAX - 16) / 16 < parser->content_length)) { + SET_ERRNO(HPE_INVALID_CONTENT_LENGTH); + goto error; + } + + parser->content_length = t; + break; + } + + case s_chunk_parameters: + { + assert(parser->flags & F_CHUNKED); + /* just ignore this shit. TODO check for overflow */ + if (ch == CR) { + UPDATE_STATE(s_chunk_size_almost_done); + break; + } + break; + } + + case s_chunk_size_almost_done: + { + assert(parser->flags & F_CHUNKED); + STRICT_CHECK(ch != LF); + + parser->nread = 0; + nread = 0; + + if (parser->content_length == 0) { + parser->flags |= F_TRAILING; + UPDATE_STATE(s_header_field_start); + } else { + UPDATE_STATE(s_chunk_data); + } + CALLBACK_NOTIFY(chunk_header); + break; + } + + case s_chunk_data: + { + uint64_t to_read = MIN(parser->content_length, + (uint64_t) ((data + len) - p)); + + assert(parser->flags & F_CHUNKED); + assert(parser->content_length != 0 + && parser->content_length != ULLONG_MAX); + + /* See the explanation in s_body_identity for why the content + * length and data pointers are managed this way. + */ + MARK(body); + parser->content_length -= to_read; + p += to_read - 1; + + if (parser->content_length == 0) { + UPDATE_STATE(s_chunk_data_almost_done); + } + + break; + } + + case s_chunk_data_almost_done: + assert(parser->flags & F_CHUNKED); + assert(parser->content_length == 0); + STRICT_CHECK(ch != CR); + UPDATE_STATE(s_chunk_data_done); + CALLBACK_DATA(body); + break; + + case s_chunk_data_done: + assert(parser->flags & F_CHUNKED); + STRICT_CHECK(ch != LF); + parser->nread = 0; + nread = 0; + UPDATE_STATE(s_chunk_size_start); + CALLBACK_NOTIFY(chunk_complete); + break; + + default: + assert(0 && "unhandled state"); + SET_ERRNO(HPE_INVALID_INTERNAL_STATE); + goto error; + } + } + + /* Run callbacks for any marks that we have leftover after we ran out of + * bytes. There should be at most one of these set, so it's OK to invoke + * them in series (unset marks will not result in callbacks). + * + * We use the NOADVANCE() variety of callbacks here because 'p' has already + * overflowed 'data' and this allows us to correct for the off-by-one that + * we'd otherwise have (since CALLBACK_DATA() is meant to be run with a 'p' + * value that's in-bounds). + */ + + assert(((header_field_mark ? 1 : 0) + + (header_value_mark ? 1 : 0) + + (url_mark ? 1 : 0) + + (body_mark ? 1 : 0) + + (status_mark ? 1 : 0)) <= 1); + + CALLBACK_DATA_NOADVANCE(header_field); + CALLBACK_DATA_NOADVANCE(header_value); + CALLBACK_DATA_NOADVANCE(url); + CALLBACK_DATA_NOADVANCE(body); + CALLBACK_DATA_NOADVANCE(status); + + RETURN(len); + +error: + if (HTTP_PARSER_ERRNO(parser) == HPE_OK) { + SET_ERRNO(HPE_UNKNOWN); + } + + RETURN(p - data); +} + + +/* Does the parser need to see an EOF to find the end of the message? */ +int +http_message_needs_eof (const http_parser *parser) +{ + if (parser->type == HTTP_REQUEST) { + return 0; + } + + /* See RFC 2616 section 4.4 */ + if (parser->status_code / 100 == 1 || /* 1xx e.g. Continue */ + parser->status_code == 204 || /* No Content */ + parser->status_code == 304 || /* Not Modified */ + parser->flags & F_SKIPBODY) { /* response to a HEAD request */ + return 0; + } + + if ((parser->flags & F_CHUNKED) || parser->content_length != ULLONG_MAX) { + return 0; + } + + return 1; +} + + +int +http_should_keep_alive (const http_parser *parser) +{ + if (parser->http_major > 0 && parser->http_minor > 0) { + /* HTTP/1.1 */ + if (parser->flags & F_CONNECTION_CLOSE) { + return 0; + } + } else { + /* HTTP/1.0 or earlier */ + if (!(parser->flags & F_CONNECTION_KEEP_ALIVE)) { + return 0; + } + } + + return !http_message_needs_eof(parser); +} + + +const char * +http_method_str (enum http_method m) +{ + return ELEM_AT(method_strings, m, ""); +} + +const char * +http_status_str (enum http_status s) +{ + switch (s) { +#define XX(num, name, string) case HTTP_STATUS_##name: return #string; + HTTP_STATUS_MAP(XX) +#undef XX + default: return ""; + } +} + +void +http_parser_init (http_parser *parser, enum http_parser_type t) +{ + void *data = parser->data; /* preserve application data */ + memset(parser, 0, sizeof(*parser)); + parser->data = data; + parser->type = t; + parser->state = (t == HTTP_REQUEST ? s_start_req : (t == HTTP_RESPONSE ? s_start_res : s_start_req_or_res)); + parser->http_errno = HPE_OK; +} + +void +http_parser_settings_init(http_parser_settings *settings) +{ + memset(settings, 0, sizeof(*settings)); +} + +const char * +http_errno_name(enum http_errno err) { + assert(((size_t) err) < ARRAY_SIZE(http_strerror_tab)); + return http_strerror_tab[err].name; +} + +const char * +http_errno_description(enum http_errno err) { + assert(((size_t) err) < ARRAY_SIZE(http_strerror_tab)); + return http_strerror_tab[err].description; +} + +static enum http_host_state +http_parse_host_char(enum http_host_state s, const char ch) { + switch(s) { + case s_http_userinfo: + case s_http_userinfo_start: + if (ch == '@') { + return s_http_host_start; + } + + if (IS_USERINFO_CHAR(ch)) { + return s_http_userinfo; + } + break; + + case s_http_host_start: + if (ch == '[') { + return s_http_host_v6_start; + } + + if (IS_HOST_CHAR(ch)) { + return s_http_host; + } + + break; + + case s_http_host: + if (IS_HOST_CHAR(ch)) { + return s_http_host; + } + + /* fall through */ + case s_http_host_v6_end: + if (ch == ':') { + return s_http_host_port_start; + } + + break; + + case s_http_host_v6: + if (ch == ']') { + return s_http_host_v6_end; + } + + /* fall through */ + case s_http_host_v6_start: + if (IS_HEX(ch) || ch == ':' || ch == '.') { + return s_http_host_v6; + } + + if (s == s_http_host_v6 && ch == '%') { + return s_http_host_v6_zone_start; + } + break; + + case s_http_host_v6_zone: + if (ch == ']') { + return s_http_host_v6_end; + } + + /* fall through */ + case s_http_host_v6_zone_start: + /* RFC 6874 Zone ID consists of 1*( unreserved / pct-encoded) */ + if (IS_ALPHANUM(ch) || ch == '%' || ch == '.' || ch == '-' || ch == '_' || + ch == '~') { + return s_http_host_v6_zone; + } + break; + + case s_http_host_port: + case s_http_host_port_start: + if (IS_NUM(ch)) { + return s_http_host_port; + } + + break; + + default: + break; + } + return s_http_host_dead; +} + +static int +http_parse_host(const char * buf, struct http_parser_url *u, int found_at) { + enum http_host_state s; + + const char *p; + size_t buflen = u->field_data[UF_HOST].off + u->field_data[UF_HOST].len; + + assert(u->field_set & (1 << UF_HOST)); + + u->field_data[UF_HOST].len = 0; + + s = found_at ? s_http_userinfo_start : s_http_host_start; + + for (p = buf + u->field_data[UF_HOST].off; p < buf + buflen; p++) { + enum http_host_state new_s = http_parse_host_char(s, *p); + + if (new_s == s_http_host_dead) { + return 1; + } + + switch(new_s) { + case s_http_host: + if (s != s_http_host) { + u->field_data[UF_HOST].off = (uint16_t)(p - buf); + } + u->field_data[UF_HOST].len++; + break; + + case s_http_host_v6: + if (s != s_http_host_v6) { + u->field_data[UF_HOST].off = (uint16_t)(p - buf); + } + u->field_data[UF_HOST].len++; + break; + + case s_http_host_v6_zone_start: + case s_http_host_v6_zone: + u->field_data[UF_HOST].len++; + break; + + case s_http_host_port: + if (s != s_http_host_port) { + u->field_data[UF_PORT].off = (uint16_t)(p - buf); + u->field_data[UF_PORT].len = 0; + u->field_set |= (1 << UF_PORT); + } + u->field_data[UF_PORT].len++; + break; + + case s_http_userinfo: + if (s != s_http_userinfo) { + u->field_data[UF_USERINFO].off = (uint16_t)(p - buf); + u->field_data[UF_USERINFO].len = 0; + u->field_set |= (1 << UF_USERINFO); + } + u->field_data[UF_USERINFO].len++; + break; + + default: + break; + } + s = new_s; + } + + /* Make sure we don't end somewhere unexpected */ + switch (s) { + case s_http_host_start: + case s_http_host_v6_start: + case s_http_host_v6: + case s_http_host_v6_zone_start: + case s_http_host_v6_zone: + case s_http_host_port_start: + case s_http_userinfo: + case s_http_userinfo_start: + return 1; + default: + break; + } + + return 0; +} + +void +http_parser_url_init(struct http_parser_url *u) { + memset(u, 0, sizeof(*u)); +} + +int +http_parser_parse_url(const char *buf, size_t buflen, int is_connect, + struct http_parser_url *u) +{ + enum state s; + const char *p; + enum http_parser_url_fields uf, old_uf; + int found_at = 0; + + if (buflen == 0) { + return 1; + } + + u->port = u->field_set = 0; + s = is_connect ? s_req_server_start : s_req_spaces_before_url; + old_uf = UF_MAX; + + for (p = buf; p < buf + buflen; p++) { + s = parse_url_char(s, *p); + + /* Figure out the next field that we're operating on */ + switch (s) { + case s_dead: + return 1; + + /* Skip delimeters */ + case s_req_schema_slash: + case s_req_schema_slash_slash: + case s_req_server_start: + case s_req_query_string_start: + case s_req_fragment_start: + continue; + + case s_req_schema: + uf = UF_SCHEMA; + break; + + case s_req_server_with_at: + found_at = 1; + + /* fall through */ + case s_req_server: + uf = UF_HOST; + break; + + case s_req_path: + uf = UF_PATH; + break; + + case s_req_query_string: + uf = UF_QUERY; + break; + + case s_req_fragment: + uf = UF_FRAGMENT; + break; + + default: + assert(0 && "Unexpected state"); + return 1; + } + + /* Nothing's changed; soldier on */ + if (uf == old_uf) { + u->field_data[uf].len++; + continue; + } + + u->field_data[uf].off = (uint16_t)(p - buf); + u->field_data[uf].len = 1; + + u->field_set |= (1 << uf); + old_uf = uf; + } + + /* host must be present if there is a schema */ + /* parsing http:///toto will fail */ + if ((u->field_set & (1 << UF_SCHEMA)) && + (u->field_set & (1 << UF_HOST)) == 0) { + return 1; + } + + if (u->field_set & (1 << UF_HOST)) { + if (http_parse_host(buf, u, found_at) != 0) { + return 1; + } + } + + /* CONNECT requests can only contain "hostname:port" */ + if (is_connect && u->field_set != ((1 << UF_HOST)|(1 << UF_PORT))) { + return 1; + } + + if (u->field_set & (1 << UF_PORT)) { + uint16_t off; + uint16_t len; + const char* p; + const char* end; + unsigned long v; + + off = u->field_data[UF_PORT].off; + len = u->field_data[UF_PORT].len; + end = buf + off + len; + + /* NOTE: The characters are already validated and are in the [0-9] range */ + assert(off + len <= buflen && "Port number overflow"); + v = 0; + for (p = buf + off; p < end; p++) { + v *= 10; + v += *p - '0'; + + /* Ports have a max value of 2^16 */ + if (v > 0xffff) { + return 1; + } + } + + u->port = (uint16_t) v; + } + + return 0; +} + +void +http_parser_pause(http_parser *parser, int paused) { + /* Users should only be pausing/unpausing a parser that is not in an error + * state. In non-debug builds, there's not much that we can do about this + * other than ignore it. + */ + if (HTTP_PARSER_ERRNO(parser) == HPE_OK || + HTTP_PARSER_ERRNO(parser) == HPE_PAUSED) { + uint32_t nread = parser->nread; /* used by the SET_ERRNO macro */ + SET_ERRNO((paused) ? HPE_PAUSED : HPE_OK); + } else { + assert(0 && "Attempting to pause parser in error state"); + } +} + +int +http_body_is_final(const struct http_parser *parser) { + return parser->state == s_message_done; +} + +unsigned long +http_parser_version(void) { + return HTTP_PARSER_VERSION_MAJOR * 0x10000 | + HTTP_PARSER_VERSION_MINOR * 0x00100 | + HTTP_PARSER_VERSION_PATCH * 0x00001; +} + +void +http_parser_set_max_header_size(uint32_t size) { + max_header_size = size; +} diff --git a/src/third_party/http-parser/http_parser.gyp b/src/third_party/http-parser/http_parser.gyp new file mode 100644 index 000000000..ef34ecaea --- /dev/null +++ b/src/third_party/http-parser/http_parser.gyp @@ -0,0 +1,111 @@ +# This file is used with the GYP meta build system. +# http://code.google.com/p/gyp/ +# To build try this: +# svn co http://gyp.googlecode.com/svn/trunk gyp +# ./gyp/gyp -f make --depth=`pwd` http_parser.gyp +# ./out/Debug/test +{ + 'target_defaults': { + 'default_configuration': 'Debug', + 'configurations': { + # TODO: hoist these out and put them somewhere common, because + # RuntimeLibrary MUST MATCH across the entire project + 'Debug': { + 'defines': [ 'DEBUG', '_DEBUG' ], + 'cflags': [ '-Wall', '-Wextra', '-O0', '-g', '-ftrapv' ], + 'msvs_settings': { + 'VCCLCompilerTool': { + 'RuntimeLibrary': 1, # static debug + }, + }, + }, + 'Release': { + 'defines': [ 'NDEBUG' ], + 'cflags': [ '-Wall', '-Wextra', '-O3' ], + 'msvs_settings': { + 'VCCLCompilerTool': { + 'RuntimeLibrary': 0, # static release + }, + }, + } + }, + 'msvs_settings': { + 'VCCLCompilerTool': { + }, + 'VCLibrarianTool': { + }, + 'VCLinkerTool': { + 'GenerateDebugInformation': 'true', + }, + }, + 'conditions': [ + ['OS == "win"', { + 'defines': [ + 'WIN32' + ], + }] + ], + }, + + 'targets': [ + { + 'target_name': 'http_parser', + 'type': 'static_library', + 'include_dirs': [ '.' ], + 'direct_dependent_settings': { + 'defines': [ 'HTTP_PARSER_STRICT=0' ], + 'include_dirs': [ '.' ], + }, + 'defines': [ 'HTTP_PARSER_STRICT=0' ], + 'sources': [ './http_parser.c', ], + 'conditions': [ + ['OS=="win"', { + 'msvs_settings': { + 'VCCLCompilerTool': { + # Compile as C++. http_parser.c is actually C99, but C++ is + # close enough in this case. + 'CompileAs': 2, + }, + }, + }] + ], + }, + + { + 'target_name': 'http_parser_strict', + 'type': 'static_library', + 'include_dirs': [ '.' ], + 'direct_dependent_settings': { + 'defines': [ 'HTTP_PARSER_STRICT=1' ], + 'include_dirs': [ '.' ], + }, + 'defines': [ 'HTTP_PARSER_STRICT=1' ], + 'sources': [ './http_parser.c', ], + 'conditions': [ + ['OS=="win"', { + 'msvs_settings': { + 'VCCLCompilerTool': { + # Compile as C++. http_parser.c is actually C99, but C++ is + # close enough in this case. + 'CompileAs': 2, + }, + }, + }] + ], + }, + + { + 'target_name': 'test-nonstrict', + 'type': 'executable', + 'dependencies': [ 'http_parser' ], + 'sources': [ 'test.c' ] + }, + + { + 'target_name': 'test-strict', + 'type': 'executable', + 'dependencies': [ 'http_parser_strict' ], + 'sources': [ 'test.c' ] + } + ] +} diff --git a/src/third_party/http-parser/http_parser.h b/src/third_party/http-parser/http_parser.h new file mode 100644 index 000000000..657d011f7 --- /dev/null +++ b/src/third_party/http-parser/http_parser.h @@ -0,0 +1,448 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#ifndef http_parser_h +#define http_parser_h +#ifdef __cplusplus +extern "C" { +#endif + +/* Also update SONAME in the Makefile whenever you change these. */ +#define HTTP_PARSER_VERSION_MAJOR 2 +#define HTTP_PARSER_VERSION_MINOR 9 +#define HTTP_PARSER_VERSION_PATCH 2 + +#include +#if defined(_WIN32) && !defined(__MINGW32__) && \ + (!defined(_MSC_VER) || _MSC_VER<1600) && !defined(__WINE__) +#include +typedef __int8 int8_t; +typedef unsigned __int8 uint8_t; +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +/* Compile with -DHTTP_PARSER_STRICT=0 to make less checks, but run + * faster + */ +#ifndef HTTP_PARSER_STRICT +# define HTTP_PARSER_STRICT 1 +#endif + +/* Maximium header size allowed. If the macro is not defined + * before including this header then the default is used. To + * change the maximum header size, define the macro in the build + * environment (e.g. -DHTTP_MAX_HEADER_SIZE=). To remove + * the effective limit on the size of the header, define the macro + * to a very large number (e.g. -DHTTP_MAX_HEADER_SIZE=0x7fffffff) + */ +#ifndef HTTP_MAX_HEADER_SIZE +# define HTTP_MAX_HEADER_SIZE (80*1024) +#endif + +typedef struct http_parser http_parser; +typedef struct http_parser_settings http_parser_settings; + + +/* Callbacks should return non-zero to indicate an error. The parser will + * then halt execution. + * + * The one exception is on_headers_complete. In a HTTP_RESPONSE parser + * returning '1' from on_headers_complete will tell the parser that it + * should not expect a body. This is used when receiving a response to a + * HEAD request which may contain 'Content-Length' or 'Transfer-Encoding: + * chunked' headers that indicate the presence of a body. + * + * Returning `2` from on_headers_complete will tell parser that it should not + * expect neither a body nor any futher responses on this connection. This is + * useful for handling responses to a CONNECT request which may not contain + * `Upgrade` or `Connection: upgrade` headers. + * + * http_data_cb does not return data chunks. It will be called arbitrarily + * many times for each string. E.G. you might get 10 callbacks for "on_url" + * each providing just a few characters more data. + */ +typedef int (*http_data_cb) (http_parser*, const char *at, size_t length); +typedef int (*http_cb) (http_parser*); + + +/* Status Codes */ +#define HTTP_STATUS_MAP(XX) \ + XX(100, CONTINUE, Continue) \ + XX(101, SWITCHING_PROTOCOLS, Switching Protocols) \ + XX(102, PROCESSING, Processing) \ + XX(200, OK, OK) \ + XX(201, CREATED, Created) \ + XX(202, ACCEPTED, Accepted) \ + XX(203, NON_AUTHORITATIVE_INFORMATION, Non-Authoritative Information) \ + XX(204, NO_CONTENT, No Content) \ + XX(205, RESET_CONTENT, Reset Content) \ + XX(206, PARTIAL_CONTENT, Partial Content) \ + XX(207, MULTI_STATUS, Multi-Status) \ + XX(208, ALREADY_REPORTED, Already Reported) \ + XX(226, IM_USED, IM Used) \ + XX(300, MULTIPLE_CHOICES, Multiple Choices) \ + XX(301, MOVED_PERMANENTLY, Moved Permanently) \ + XX(302, FOUND, Found) \ + XX(303, SEE_OTHER, See Other) \ + XX(304, NOT_MODIFIED, Not Modified) \ + XX(305, USE_PROXY, Use Proxy) \ + XX(307, TEMPORARY_REDIRECT, Temporary Redirect) \ + XX(308, PERMANENT_REDIRECT, Permanent Redirect) \ + XX(400, BAD_REQUEST, Bad Request) \ + XX(401, UNAUTHORIZED, Unauthorized) \ + XX(402, PAYMENT_REQUIRED, Payment Required) \ + XX(403, FORBIDDEN, Forbidden) \ + XX(404, NOT_FOUND, Not Found) \ + XX(405, METHOD_NOT_ALLOWED, Method Not Allowed) \ + XX(406, NOT_ACCEPTABLE, Not Acceptable) \ + XX(407, PROXY_AUTHENTICATION_REQUIRED, Proxy Authentication Required) \ + XX(408, REQUEST_TIMEOUT, Request Timeout) \ + XX(409, CONFLICT, Conflict) \ + XX(410, GONE, Gone) \ + XX(411, LENGTH_REQUIRED, Length Required) \ + XX(412, PRECONDITION_FAILED, Precondition Failed) \ + XX(413, PAYLOAD_TOO_LARGE, Payload Too Large) \ + XX(414, URI_TOO_LONG, URI Too Long) \ + XX(415, UNSUPPORTED_MEDIA_TYPE, Unsupported Media Type) \ + XX(416, RANGE_NOT_SATISFIABLE, Range Not Satisfiable) \ + XX(417, EXPECTATION_FAILED, Expectation Failed) \ + XX(421, MISDIRECTED_REQUEST, Misdirected Request) \ + XX(422, UNPROCESSABLE_ENTITY, Unprocessable Entity) \ + XX(423, LOCKED, Locked) \ + XX(424, FAILED_DEPENDENCY, Failed Dependency) \ + XX(426, UPGRADE_REQUIRED, Upgrade Required) \ + XX(428, PRECONDITION_REQUIRED, Precondition Required) \ + XX(429, TOO_MANY_REQUESTS, Too Many Requests) \ + XX(431, REQUEST_HEADER_FIELDS_TOO_LARGE, Request Header Fields Too Large) \ + XX(451, UNAVAILABLE_FOR_LEGAL_REASONS, Unavailable For Legal Reasons) \ + XX(500, INTERNAL_SERVER_ERROR, Internal Server Error) \ + XX(501, NOT_IMPLEMENTED, Not Implemented) \ + XX(502, BAD_GATEWAY, Bad Gateway) \ + XX(503, SERVICE_UNAVAILABLE, Service Unavailable) \ + XX(504, GATEWAY_TIMEOUT, Gateway Timeout) \ + XX(505, HTTP_VERSION_NOT_SUPPORTED, HTTP Version Not Supported) \ + XX(506, VARIANT_ALSO_NEGOTIATES, Variant Also Negotiates) \ + XX(507, INSUFFICIENT_STORAGE, Insufficient Storage) \ + XX(508, LOOP_DETECTED, Loop Detected) \ + XX(510, NOT_EXTENDED, Not Extended) \ + XX(511, NETWORK_AUTHENTICATION_REQUIRED, Network Authentication Required) \ + +enum http_status + { +#define XX(num, name, string) HTTP_STATUS_##name = num, + HTTP_STATUS_MAP(XX) +#undef XX + /* @cond IGNORE */ + HTTP_STATUS_MAP_LAST_ENTRY + /* @endcond */ + }; + + +/* Request Methods */ +#define HTTP_METHOD_MAP(XX) \ + XX(0, DELETE, DELETE) \ + XX(1, GET, GET) \ + XX(2, HEAD, HEAD) \ + XX(3, POST, POST) \ + XX(4, PUT, PUT) \ + /* pathological */ \ + XX(5, CONNECT, CONNECT) \ + XX(6, OPTIONS, OPTIONS) \ + XX(7, TRACE, TRACE) \ + /* WebDAV */ \ + XX(8, COPY, COPY) \ + XX(9, LOCK, LOCK) \ + XX(10, MKCOL, MKCOL) \ + XX(11, MOVE, MOVE) \ + XX(12, PROPFIND, PROPFIND) \ + XX(13, PROPPATCH, PROPPATCH) \ + XX(14, SEARCH, SEARCH) \ + XX(15, UNLOCK, UNLOCK) \ + XX(16, BIND, BIND) \ + XX(17, REBIND, REBIND) \ + XX(18, UNBIND, UNBIND) \ + XX(19, ACL, ACL) \ + /* subversion */ \ + XX(20, REPORT, REPORT) \ + XX(21, MKACTIVITY, MKACTIVITY) \ + XX(22, CHECKOUT, CHECKOUT) \ + XX(23, MERGE, MERGE) \ + /* upnp */ \ + XX(24, MSEARCH, M-SEARCH) \ + XX(25, NOTIFY, NOTIFY) \ + XX(26, SUBSCRIBE, SUBSCRIBE) \ + XX(27, UNSUBSCRIBE, UNSUBSCRIBE) \ + /* RFC-5789 */ \ + XX(28, PATCH, PATCH) \ + XX(29, PURGE, PURGE) \ + /* CalDAV */ \ + XX(30, MKCALENDAR, MKCALENDAR) \ + /* RFC-2068, section 19.6.1.2 */ \ + XX(31, LINK, LINK) \ + XX(32, UNLINK, UNLINK) \ + /* icecast */ \ + XX(33, SOURCE, SOURCE) \ + +enum http_method + { +#define XX(num, name, string) HTTP_##name = num, + HTTP_METHOD_MAP(XX) +#undef XX + /* @cond IGNORE */ + HTTP_METHOD_MAP_LAST_ENTRY + /* @endcond */ + }; + + +enum http_parser_type { HTTP_REQUEST, HTTP_RESPONSE, HTTP_BOTH }; + + +/* Flag values for http_parser.flags field */ +enum flags + { F_CHUNKED = 1 << 0 + , F_CONNECTION_KEEP_ALIVE = 1 << 1 + , F_CONNECTION_CLOSE = 1 << 2 + , F_CONNECTION_UPGRADE = 1 << 3 + , F_TRAILING = 1 << 4 + , F_UPGRADE = 1 << 5 + , F_SKIPBODY = 1 << 6 + , F_CONTENTLENGTH = 1 << 7 + }; + + +/* Map for errno-related constants + * + * The provided argument should be a macro that takes 2 arguments. + */ +#define HTTP_ERRNO_MAP(XX) \ + /* No error */ \ + XX(OK, "success") \ + \ + /* Callback-related errors */ \ + XX(CB_message_begin, "the on_message_begin callback failed") \ + XX(CB_url, "the on_url callback failed") \ + XX(CB_header_field, "the on_header_field callback failed") \ + XX(CB_header_value, "the on_header_value callback failed") \ + XX(CB_headers_complete, "the on_headers_complete callback failed") \ + XX(CB_body, "the on_body callback failed") \ + XX(CB_message_complete, "the on_message_complete callback failed") \ + XX(CB_status, "the on_status callback failed") \ + XX(CB_chunk_header, "the on_chunk_header callback failed") \ + XX(CB_chunk_complete, "the on_chunk_complete callback failed") \ + \ + /* Parsing-related errors */ \ + XX(INVALID_EOF_STATE, "stream ended at an unexpected time") \ + XX(HEADER_OVERFLOW, \ + "too many header bytes seen; overflow detected") \ + XX(CLOSED_CONNECTION, \ + "data received after completed connection: close message") \ + XX(INVALID_VERSION, "invalid HTTP version") \ + XX(INVALID_STATUS, "invalid HTTP status code") \ + XX(INVALID_METHOD, "invalid HTTP method") \ + XX(INVALID_URL, "invalid URL") \ + XX(INVALID_HOST, "invalid host") \ + XX(INVALID_PORT, "invalid port") \ + XX(INVALID_PATH, "invalid path") \ + XX(INVALID_QUERY_STRING, "invalid query string") \ + XX(INVALID_FRAGMENT, "invalid fragment") \ + XX(LF_EXPECTED, "LF character expected") \ + XX(INVALID_HEADER_TOKEN, "invalid character in header") \ + XX(INVALID_CONTENT_LENGTH, \ + "invalid character in content-length header") \ + XX(UNEXPECTED_CONTENT_LENGTH, \ + "unexpected content-length header") \ + XX(INVALID_CHUNK_SIZE, \ + "invalid character in chunk size header") \ + XX(INVALID_CONSTANT, "invalid constant string") \ + XX(INVALID_INTERNAL_STATE, "encountered unexpected internal state")\ + XX(STRICT, "strict mode assertion failed") \ + XX(PAUSED, "parser is paused") \ + XX(UNKNOWN, "an unknown error occurred") + + +/* Define HPE_* values for each errno value above */ +#define HTTP_ERRNO_GEN(n, s) HPE_##n, +enum http_errno { + HTTP_ERRNO_MAP(HTTP_ERRNO_GEN) + /* @cond IGNORE */ + HTTP_ERRNO_MAP_LAST_ENTRY + /* @endcond */ +}; +#undef HTTP_ERRNO_GEN + + +/* Get an http_errno value from an http_parser */ +#define HTTP_PARSER_ERRNO(p) ((enum http_errno) (p)->http_errno) + + +struct http_parser { + /** PRIVATE **/ + unsigned int type : 2; /* enum http_parser_type */ + unsigned int flags : 8; /* F_* values from 'flags' enum; semi-public */ + unsigned int state : 7; /* enum state from http_parser.c */ + unsigned int header_state : 7; /* enum header_state from http_parser.c */ + unsigned int index : 7; /* index into current matcher */ + unsigned int lenient_http_headers : 1; + + uint32_t nread; /* # bytes read in various scenarios */ + uint64_t content_length; /* # bytes in body (0 if no Content-Length header) */ + + /** READ-ONLY **/ + unsigned short http_major; + unsigned short http_minor; + unsigned int status_code : 16; /* responses only */ + unsigned int method : 8; /* requests only */ + unsigned int http_errno : 7; + + /* 1 = Upgrade header was present and the parser has exited because of that. + * 0 = No upgrade header present. + * Should be checked when http_parser_execute() returns in addition to + * error checking. + */ + unsigned int upgrade : 1; + + /** PUBLIC **/ + void *data; /* A pointer to get hook to the "connection" or "socket" object */ +}; + + +struct http_parser_settings { + http_cb on_message_begin; + http_data_cb on_url; + http_data_cb on_status; + http_data_cb on_header_field; + http_data_cb on_header_value; + http_cb on_headers_complete; + http_data_cb on_body; + http_cb on_message_complete; + /* When on_chunk_header is called, the current chunk length is stored + * in parser->content_length. + */ + http_cb on_chunk_header; + http_cb on_chunk_complete; +}; + + +enum http_parser_url_fields + { UF_SCHEMA = 0 + , UF_HOST = 1 + , UF_PORT = 2 + , UF_PATH = 3 + , UF_QUERY = 4 + , UF_FRAGMENT = 5 + , UF_USERINFO = 6 + , UF_MAX = 7 + }; + + +/* Result structure for http_parser_parse_url(). + * + * Callers should index into field_data[] with UF_* values iff field_set + * has the relevant (1 << UF_*) bit set. As a courtesy to clients (and + * because we probably have padding left over), we convert any port to + * a uint16_t. + */ +struct http_parser_url { + uint16_t field_set; /* Bitmask of (1 << UF_*) values */ + uint16_t port; /* Converted UF_PORT string */ + + struct { + uint16_t off; /* Offset into buffer in which field starts */ + uint16_t len; /* Length of run in buffer */ + } field_data[UF_MAX]; +}; + + +/* Returns the library version. Bits 16-23 contain the major version number, + * bits 8-15 the minor version number and bits 0-7 the patch level. + * Usage example: + * + * unsigned long version = http_parser_version(); + * unsigned major = (version >> 16) & 255; + * unsigned minor = (version >> 8) & 255; + * unsigned patch = version & 255; + * printf("http_parser v%u.%u.%u\n", major, minor, patch); + */ +unsigned long http_parser_version(void); + +void http_parser_init(http_parser *parser, enum http_parser_type type); + + +/* Initialize http_parser_settings members to 0 + */ +void http_parser_settings_init(http_parser_settings *settings); + + +/* Executes the parser. Returns number of parsed bytes. Sets + * `parser->http_errno` on error. */ +size_t http_parser_execute(http_parser *parser, + const http_parser_settings *settings, + const char *data, + size_t len); + + +/* If http_should_keep_alive() in the on_headers_complete or + * on_message_complete callback returns 0, then this should be + * the last message on the connection. + * If you are the server, respond with the "Connection: close" header. + * If you are the client, close the connection. + */ +int http_should_keep_alive(const http_parser *parser); + +/* Returns a string version of the HTTP method. */ +const char *http_method_str(enum http_method m); + +/* Returns a string version of the HTTP status code. */ +const char *http_status_str(enum http_status s); + +/* Return a string name of the given error */ +const char *http_errno_name(enum http_errno err); + +/* Return a string description of the given error */ +const char *http_errno_description(enum http_errno err); + +/* Initialize all http_parser_url members to 0 */ +void http_parser_url_init(struct http_parser_url *u); + +/* Parse a URL; return nonzero on failure */ +int http_parser_parse_url(const char *buf, size_t buflen, + int is_connect, + struct http_parser_url *u); + +/* Pause or un-pause the parser; a nonzero value pauses */ +void http_parser_pause(http_parser *parser, int paused); + +/* Checks if this is the final chunk of the body. */ +int http_body_is_final(const http_parser *parser); + +/* Change the maximum header size provided at compile time. */ +void http_parser_set_max_header_size(uint32_t size); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/src/third_party/http-parser/test.c b/src/third_party/http-parser/test.c new file mode 100644 index 000000000..0140a18b7 --- /dev/null +++ b/src/third_party/http-parser/test.c @@ -0,0 +1,4518 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include "http_parser.h" +#include +#include +#include +#include /* rand */ +#include +#include + +#if defined(__APPLE__) +# undef strlncpy +#endif /* defined(__APPLE__) */ + +#undef TRUE +#define TRUE 1 +#undef FALSE +#define FALSE 0 + +#define MAX_HEADERS 13 +#define MAX_ELEMENT_SIZE 2048 +#define MAX_CHUNKS 16 + +#define MIN(a,b) ((a) < (b) ? (a) : (b)) + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*x)) + +static http_parser parser; + +struct message { + const char *name; // for debugging purposes + const char *raw; + enum http_parser_type type; + enum http_method method; + int status_code; + char response_status[MAX_ELEMENT_SIZE]; + char request_path[MAX_ELEMENT_SIZE]; + char request_url[MAX_ELEMENT_SIZE]; + char fragment[MAX_ELEMENT_SIZE]; + char query_string[MAX_ELEMENT_SIZE]; + char body[MAX_ELEMENT_SIZE]; + size_t body_size; + const char *host; + const char *userinfo; + uint16_t port; + int num_headers; + enum { NONE=0, FIELD, VALUE } last_header_element; + char headers [MAX_HEADERS][2][MAX_ELEMENT_SIZE]; + int should_keep_alive; + + int num_chunks; + int num_chunks_complete; + int chunk_lengths[MAX_CHUNKS]; + + const char *upgrade; // upgraded body + + unsigned short http_major; + unsigned short http_minor; + + int message_begin_cb_called; + int headers_complete_cb_called; + int message_complete_cb_called; + int status_cb_called; + int message_complete_on_eof; + int body_is_final; +}; + +static int currently_parsing_eof; + +static struct message messages[5]; +static int num_messages; +static http_parser_settings *current_pause_parser; + +/* * R E Q U E S T S * */ +const struct message requests[] = +#define CURL_GET 0 +{ {.name= "curl get" + ,.type= HTTP_REQUEST + ,.raw= "GET /test HTTP/1.1\r\n" + "User-Agent: curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1\r\n" + "Host: 0.0.0.0=5000\r\n" + "Accept: */*\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/test" + ,.request_url= "/test" + ,.num_headers= 3 + ,.headers= + { { "User-Agent", "curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1" } + , { "Host", "0.0.0.0=5000" } + , { "Accept", "*/*" } + } + ,.body= "" + } + +#define FIREFOX_GET 1 +, {.name= "firefox get" + ,.type= HTTP_REQUEST + ,.raw= "GET /favicon.ico HTTP/1.1\r\n" + "Host: 0.0.0.0=5000\r\n" + "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9) Gecko/2008061015 Firefox/3.0\r\n" + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + "Accept-Language: en-us,en;q=0.5\r\n" + "Accept-Encoding: gzip,deflate\r\n" + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + "Keep-Alive: 300\r\n" + "Connection: keep-alive\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/favicon.ico" + ,.request_url= "/favicon.ico" + ,.num_headers= 8 + ,.headers= + { { "Host", "0.0.0.0=5000" } + , { "User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9) Gecko/2008061015 Firefox/3.0" } + , { "Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" } + , { "Accept-Language", "en-us,en;q=0.5" } + , { "Accept-Encoding", "gzip,deflate" } + , { "Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7" } + , { "Keep-Alive", "300" } + , { "Connection", "keep-alive" } + } + ,.body= "" + } + +#define DUMBLUCK 2 +, {.name= "dumbluck" + ,.type= HTTP_REQUEST + ,.raw= "GET /dumbluck HTTP/1.1\r\n" + "aaaaaaaaaaaaa:++++++++++\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/dumbluck" + ,.request_url= "/dumbluck" + ,.num_headers= 1 + ,.headers= + { { "aaaaaaaaaaaaa", "++++++++++" } + } + ,.body= "" + } + +#define FRAGMENT_IN_URI 3 +, {.name= "fragment in url" + ,.type= HTTP_REQUEST + ,.raw= "GET /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "page=1" + ,.fragment= "posts-17408" + ,.request_path= "/forums/1/topics/2375" + /* XXX request url does include fragment? */ + ,.request_url= "/forums/1/topics/2375?page=1#posts-17408" + ,.num_headers= 0 + ,.body= "" + } + +#define GET_NO_HEADERS_NO_BODY 4 +, {.name= "get no headers no body" + ,.type= HTTP_REQUEST + ,.raw= "GET /get_no_headers_no_body/world HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE /* would need Connection: close */ + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/get_no_headers_no_body/world" + ,.request_url= "/get_no_headers_no_body/world" + ,.num_headers= 0 + ,.body= "" + } + +#define GET_ONE_HEADER_NO_BODY 5 +, {.name= "get one header no body" + ,.type= HTTP_REQUEST + ,.raw= "GET /get_one_header_no_body HTTP/1.1\r\n" + "Accept: */*\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE /* would need Connection: close */ + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/get_one_header_no_body" + ,.request_url= "/get_one_header_no_body" + ,.num_headers= 1 + ,.headers= + { { "Accept" , "*/*" } + } + ,.body= "" + } + +#define GET_FUNKY_CONTENT_LENGTH 6 +, {.name= "get funky content length body hello" + ,.type= HTTP_REQUEST + ,.raw= "GET /get_funky_content_length_body_hello HTTP/1.0\r\n" + "conTENT-Length: 5\r\n" + "\r\n" + "HELLO" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/get_funky_content_length_body_hello" + ,.request_url= "/get_funky_content_length_body_hello" + ,.num_headers= 1 + ,.headers= + { { "conTENT-Length" , "5" } + } + ,.body= "HELLO" + } + +#define POST_IDENTITY_BODY_WORLD 7 +, {.name= "post identity body world" + ,.type= HTTP_REQUEST + ,.raw= "POST /post_identity_body_world?q=search#hey HTTP/1.1\r\n" + "Accept: */*\r\n" + "Transfer-Encoding: identity\r\n" + "Content-Length: 5\r\n" + "\r\n" + "World" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "q=search" + ,.fragment= "hey" + ,.request_path= "/post_identity_body_world" + ,.request_url= "/post_identity_body_world?q=search#hey" + ,.num_headers= 3 + ,.headers= + { { "Accept", "*/*" } + , { "Transfer-Encoding", "identity" } + , { "Content-Length", "5" } + } + ,.body= "World" + } + +#define POST_CHUNKED_ALL_YOUR_BASE 8 +, {.name= "post - chunked body: all your base are belong to us" + ,.type= HTTP_REQUEST + ,.raw= "POST /post_chunked_all_your_base HTTP/1.1\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "1e\r\nall your base are belong to us\r\n" + "0\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/post_chunked_all_your_base" + ,.request_url= "/post_chunked_all_your_base" + ,.num_headers= 1 + ,.headers= + { { "Transfer-Encoding" , "chunked" } + } + ,.body= "all your base are belong to us" + ,.num_chunks_complete= 2 + ,.chunk_lengths= { 0x1e } + } + +#define TWO_CHUNKS_MULT_ZERO_END 9 +, {.name= "two chunks ; triple zero ending" + ,.type= HTTP_REQUEST + ,.raw= "POST /two_chunks_mult_zero_end HTTP/1.1\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "5\r\nhello\r\n" + "6\r\n world\r\n" + "000\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/two_chunks_mult_zero_end" + ,.request_url= "/two_chunks_mult_zero_end" + ,.num_headers= 1 + ,.headers= + { { "Transfer-Encoding", "chunked" } + } + ,.body= "hello world" + ,.num_chunks_complete= 3 + ,.chunk_lengths= { 5, 6 } + } + +#define CHUNKED_W_TRAILING_HEADERS 10 +, {.name= "chunked with trailing headers. blech." + ,.type= HTTP_REQUEST + ,.raw= "POST /chunked_w_trailing_headers HTTP/1.1\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "5\r\nhello\r\n" + "6\r\n world\r\n" + "0\r\n" + "Vary: *\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/chunked_w_trailing_headers" + ,.request_url= "/chunked_w_trailing_headers" + ,.num_headers= 3 + ,.headers= + { { "Transfer-Encoding", "chunked" } + , { "Vary", "*" } + , { "Content-Type", "text/plain" } + } + ,.body= "hello world" + ,.num_chunks_complete= 3 + ,.chunk_lengths= { 5, 6 } + } + +#define CHUNKED_W_NONSENSE_AFTER_LENGTH 11 +, {.name= "with nonsense after the length" + ,.type= HTTP_REQUEST + ,.raw= "POST /chunked_w_nonsense_after_length HTTP/1.1\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "5; ilovew3;whattheluck=aretheseparametersfor\r\nhello\r\n" + "6; blahblah; blah\r\n world\r\n" + "0\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/chunked_w_nonsense_after_length" + ,.request_url= "/chunked_w_nonsense_after_length" + ,.num_headers= 1 + ,.headers= + { { "Transfer-Encoding", "chunked" } + } + ,.body= "hello world" + ,.num_chunks_complete= 3 + ,.chunk_lengths= { 5, 6 } + } + +#define WITH_QUOTES 12 +, {.name= "with quotes" + ,.type= HTTP_REQUEST + ,.raw= "GET /with_\"stupid\"_quotes?foo=\"bar\" HTTP/1.1\r\n\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "foo=\"bar\"" + ,.fragment= "" + ,.request_path= "/with_\"stupid\"_quotes" + ,.request_url= "/with_\"stupid\"_quotes?foo=\"bar\"" + ,.num_headers= 0 + ,.headers= { } + ,.body= "" + } + +#define APACHEBENCH_GET 13 +/* The server receiving this request SHOULD NOT wait for EOF + * to know that content-length == 0. + * How to represent this in a unit test? message_complete_on_eof + * Compare with NO_CONTENT_LENGTH_RESPONSE. + */ +, {.name = "apachebench get" + ,.type= HTTP_REQUEST + ,.raw= "GET /test HTTP/1.0\r\n" + "Host: 0.0.0.0:5000\r\n" + "User-Agent: ApacheBench/2.3\r\n" + "Accept: */*\r\n\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/test" + ,.request_url= "/test" + ,.num_headers= 3 + ,.headers= { { "Host", "0.0.0.0:5000" } + , { "User-Agent", "ApacheBench/2.3" } + , { "Accept", "*/*" } + } + ,.body= "" + } + +#define QUERY_URL_WITH_QUESTION_MARK_GET 14 +/* Some clients include '?' characters in query strings. + */ +, {.name = "query url with question mark" + ,.type= HTTP_REQUEST + ,.raw= "GET /test.cgi?foo=bar?baz HTTP/1.1\r\n\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "foo=bar?baz" + ,.fragment= "" + ,.request_path= "/test.cgi" + ,.request_url= "/test.cgi?foo=bar?baz" + ,.num_headers= 0 + ,.headers= {} + ,.body= "" + } + +#define PREFIX_NEWLINE_GET 15 +/* Some clients, especially after a POST in a keep-alive connection, + * will send an extra CRLF before the next request + */ +, {.name = "newline prefix get" + ,.type= HTTP_REQUEST + ,.raw= "\r\nGET /test HTTP/1.1\r\n\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/test" + ,.request_url= "/test" + ,.num_headers= 0 + ,.headers= { } + ,.body= "" + } + +#define UPGRADE_REQUEST 16 +, {.name = "upgrade request" + ,.type= HTTP_REQUEST + ,.raw= "GET /demo HTTP/1.1\r\n" + "Host: example.com\r\n" + "Connection: Upgrade\r\n" + "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\n" + "Sec-WebSocket-Protocol: sample\r\n" + "Upgrade: WebSocket\r\n" + "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\n" + "Origin: http://example.com\r\n" + "\r\n" + "Hot diggity dogg" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/demo" + ,.request_url= "/demo" + ,.num_headers= 7 + ,.upgrade="Hot diggity dogg" + ,.headers= { { "Host", "example.com" } + , { "Connection", "Upgrade" } + , { "Sec-WebSocket-Key2", "12998 5 Y3 1 .P00" } + , { "Sec-WebSocket-Protocol", "sample" } + , { "Upgrade", "WebSocket" } + , { "Sec-WebSocket-Key1", "4 @1 46546xW%0l 1 5" } + , { "Origin", "http://example.com" } + } + ,.body= "" + } + +#define CONNECT_REQUEST 17 +, {.name = "connect request" + ,.type= HTTP_REQUEST + ,.raw= "CONNECT 0-home0.netscape.com:443 HTTP/1.0\r\n" + "User-agent: Mozilla/1.1N\r\n" + "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n" + "\r\n" + "some data\r\n" + "and yet even more data" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_CONNECT + ,.query_string= "" + ,.fragment= "" + ,.request_path= "" + ,.request_url= "0-home0.netscape.com:443" + ,.num_headers= 2 + ,.upgrade="some data\r\nand yet even more data" + ,.headers= { { "User-agent", "Mozilla/1.1N" } + , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" } + } + ,.body= "" + } + +#define REPORT_REQ 18 +, {.name= "report request" + ,.type= HTTP_REQUEST + ,.raw= "REPORT /test HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_REPORT + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/test" + ,.request_url= "/test" + ,.num_headers= 0 + ,.headers= {} + ,.body= "" + } + +#define NO_HTTP_VERSION 19 +, {.name= "request with no http version" + ,.type= HTTP_REQUEST + ,.raw= "GET /\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 0 + ,.http_minor= 9 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 0 + ,.headers= {} + ,.body= "" + } + +#define MSEARCH_REQ 20 +, {.name= "m-search request" + ,.type= HTTP_REQUEST + ,.raw= "M-SEARCH * HTTP/1.1\r\n" + "HOST: 239.255.255.250:1900\r\n" + "MAN: \"ssdp:discover\"\r\n" + "ST: \"ssdp:all\"\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_MSEARCH + ,.query_string= "" + ,.fragment= "" + ,.request_path= "*" + ,.request_url= "*" + ,.num_headers= 3 + ,.headers= { { "HOST", "239.255.255.250:1900" } + , { "MAN", "\"ssdp:discover\"" } + , { "ST", "\"ssdp:all\"" } + } + ,.body= "" + } + +#define LINE_FOLDING_IN_HEADER 21 +, {.name= "line folding in header value" + ,.type= HTTP_REQUEST + ,.raw= "GET / HTTP/1.1\r\n" + "Line1: abc\r\n" + "\tdef\r\n" + " ghi\r\n" + "\t\tjkl\r\n" + " mno \r\n" + "\t \tqrs\r\n" + "Line2: \t line2\t\r\n" + "Line3:\r\n" + " line3\r\n" + "Line4: \r\n" + " \r\n" + "Connection:\r\n" + " close\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 5 + ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" } + , { "Line2", "line2\t" } + , { "Line3", "line3" } + , { "Line4", "" } + , { "Connection", "close" }, + } + ,.body= "" + } + + +#define QUERY_TERMINATED_HOST 22 +, {.name= "host terminated by a query string" + ,.type= HTTP_REQUEST + ,.raw= "GET http://hypnotoad.org?hail=all HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "hail=all" + ,.fragment= "" + ,.request_path= "" + ,.request_url= "http://hypnotoad.org?hail=all" + ,.host= "hypnotoad.org" + ,.num_headers= 0 + ,.headers= { } + ,.body= "" + } + +#define QUERY_TERMINATED_HOSTPORT 23 +, {.name= "host:port terminated by a query string" + ,.type= HTTP_REQUEST + ,.raw= "GET http://hypnotoad.org:1234?hail=all HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "hail=all" + ,.fragment= "" + ,.request_path= "" + ,.request_url= "http://hypnotoad.org:1234?hail=all" + ,.host= "hypnotoad.org" + ,.port= 1234 + ,.num_headers= 0 + ,.headers= { } + ,.body= "" + } + +#define SPACE_TERMINATED_HOSTPORT 24 +, {.name= "host:port terminated by a space" + ,.type= HTTP_REQUEST + ,.raw= "GET http://hypnotoad.org:1234 HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "" + ,.request_url= "http://hypnotoad.org:1234" + ,.host= "hypnotoad.org" + ,.port= 1234 + ,.num_headers= 0 + ,.headers= { } + ,.body= "" + } + +#define PATCH_REQ 25 +, {.name = "PATCH request" + ,.type= HTTP_REQUEST + ,.raw= "PATCH /file.txt HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Content-Type: application/example\r\n" + "If-Match: \"e0023aa4e\"\r\n" + "Content-Length: 10\r\n" + "\r\n" + "cccccccccc" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_PATCH + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/file.txt" + ,.request_url= "/file.txt" + ,.num_headers= 4 + ,.headers= { { "Host", "www.example.com" } + , { "Content-Type", "application/example" } + , { "If-Match", "\"e0023aa4e\"" } + , { "Content-Length", "10" } + } + ,.body= "cccccccccc" + } + +#define CONNECT_CAPS_REQUEST 26 +, {.name = "connect caps request" + ,.type= HTTP_REQUEST + ,.raw= "CONNECT HOME0.NETSCAPE.COM:443 HTTP/1.0\r\n" + "User-agent: Mozilla/1.1N\r\n" + "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_CONNECT + ,.query_string= "" + ,.fragment= "" + ,.request_path= "" + ,.request_url= "HOME0.NETSCAPE.COM:443" + ,.num_headers= 2 + ,.upgrade="" + ,.headers= { { "User-agent", "Mozilla/1.1N" } + , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" } + } + ,.body= "" + } + +#if !HTTP_PARSER_STRICT +#define UTF8_PATH_REQ 27 +, {.name= "utf-8 path request" + ,.type= HTTP_REQUEST + ,.raw= "GET /δ¶/δt/pope?q=1#narf HTTP/1.1\r\n" + "Host: github.com\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "q=1" + ,.fragment= "narf" + ,.request_path= "/δ¶/δt/pope" + ,.request_url= "/δ¶/δt/pope?q=1#narf" + ,.num_headers= 1 + ,.headers= { {"Host", "github.com" } + } + ,.body= "" + } + +#define HOSTNAME_UNDERSCORE 28 +, {.name = "hostname underscore" + ,.type= HTTP_REQUEST + ,.raw= "CONNECT home_0.netscape.com:443 HTTP/1.0\r\n" + "User-agent: Mozilla/1.1N\r\n" + "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_CONNECT + ,.query_string= "" + ,.fragment= "" + ,.request_path= "" + ,.request_url= "home_0.netscape.com:443" + ,.num_headers= 2 + ,.upgrade="" + ,.headers= { { "User-agent", "Mozilla/1.1N" } + , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" } + } + ,.body= "" + } +#endif /* !HTTP_PARSER_STRICT */ + +/* see https://github.com/ry/http-parser/issues/47 */ +#define EAT_TRAILING_CRLF_NO_CONNECTION_CLOSE 29 +, {.name = "eat CRLF between requests, no \"Connection: close\" header" + ,.raw= "POST / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Content-Type: application/x-www-form-urlencoded\r\n" + "Content-Length: 4\r\n" + "\r\n" + "q=42\r\n" /* note the trailing CRLF */ + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 3 + ,.upgrade= 0 + ,.headers= { { "Host", "www.example.com" } + , { "Content-Type", "application/x-www-form-urlencoded" } + , { "Content-Length", "4" } + } + ,.body= "q=42" + } + +/* see https://github.com/ry/http-parser/issues/47 */ +#define EAT_TRAILING_CRLF_WITH_CONNECTION_CLOSE 30 +, {.name = "eat CRLF between requests even if \"Connection: close\" is set" + ,.raw= "POST / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Content-Type: application/x-www-form-urlencoded\r\n" + "Content-Length: 4\r\n" + "Connection: close\r\n" + "\r\n" + "q=42\r\n" /* note the trailing CRLF */ + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE /* input buffer isn't empty when on_message_complete is called */ + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 4 + ,.upgrade= 0 + ,.headers= { { "Host", "www.example.com" } + , { "Content-Type", "application/x-www-form-urlencoded" } + , { "Content-Length", "4" } + , { "Connection", "close" } + } + ,.body= "q=42" + } + +#define PURGE_REQ 31 +, {.name = "PURGE request" + ,.type= HTTP_REQUEST + ,.raw= "PURGE /file.txt HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_PURGE + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/file.txt" + ,.request_url= "/file.txt" + ,.num_headers= 1 + ,.headers= { { "Host", "www.example.com" } } + ,.body= "" + } + +#define SEARCH_REQ 32 +, {.name = "SEARCH request" + ,.type= HTTP_REQUEST + ,.raw= "SEARCH / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_SEARCH + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 1 + ,.headers= { { "Host", "www.example.com" } } + ,.body= "" + } + +#define PROXY_WITH_BASIC_AUTH 33 +, {.name= "host:port and basic_auth" + ,.type= HTTP_REQUEST + ,.raw= "GET http://a%12:b!&*$@hypnotoad.org:1234/toto HTTP/1.1\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.fragment= "" + ,.request_path= "/toto" + ,.request_url= "http://a%12:b!&*$@hypnotoad.org:1234/toto" + ,.host= "hypnotoad.org" + ,.userinfo= "a%12:b!&*$" + ,.port= 1234 + ,.num_headers= 0 + ,.headers= { } + ,.body= "" + } + +#define LINE_FOLDING_IN_HEADER_WITH_LF 34 +, {.name= "line folding in header value" + ,.type= HTTP_REQUEST + ,.raw= "GET / HTTP/1.1\n" + "Line1: abc\n" + "\tdef\n" + " ghi\n" + "\t\tjkl\n" + " mno \n" + "\t \tqrs\n" + "Line2: \t line2\t\n" + "Line3:\n" + " line3\n" + "Line4: \n" + " \n" + "Connection:\n" + " close\n" + "\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/" + ,.request_url= "/" + ,.num_headers= 5 + ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" } + , { "Line2", "line2\t" } + , { "Line3", "line3" } + , { "Line4", "" } + , { "Connection", "close" }, + } + ,.body= "" + } + +#define CONNECTION_MULTI 35 +, {.name = "multiple connection header values with folding" + ,.type= HTTP_REQUEST + ,.raw= "GET /demo HTTP/1.1\r\n" + "Host: example.com\r\n" + "Connection: Something,\r\n" + " Upgrade, ,Keep-Alive\r\n" + "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\n" + "Sec-WebSocket-Protocol: sample\r\n" + "Upgrade: WebSocket\r\n" + "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\n" + "Origin: http://example.com\r\n" + "\r\n" + "Hot diggity dogg" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/demo" + ,.request_url= "/demo" + ,.num_headers= 7 + ,.upgrade="Hot diggity dogg" + ,.headers= { { "Host", "example.com" } + , { "Connection", "Something, Upgrade, ,Keep-Alive" } + , { "Sec-WebSocket-Key2", "12998 5 Y3 1 .P00" } + , { "Sec-WebSocket-Protocol", "sample" } + , { "Upgrade", "WebSocket" } + , { "Sec-WebSocket-Key1", "4 @1 46546xW%0l 1 5" } + , { "Origin", "http://example.com" } + } + ,.body= "" + } + +#define CONNECTION_MULTI_LWS 36 +, {.name = "multiple connection header values with folding and lws" + ,.type= HTTP_REQUEST + ,.raw= "GET /demo HTTP/1.1\r\n" + "Connection: keep-alive, upgrade\r\n" + "Upgrade: WebSocket\r\n" + "\r\n" + "Hot diggity dogg" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/demo" + ,.request_url= "/demo" + ,.num_headers= 2 + ,.upgrade="Hot diggity dogg" + ,.headers= { { "Connection", "keep-alive, upgrade" } + , { "Upgrade", "WebSocket" } + } + ,.body= "" + } + +#define CONNECTION_MULTI_LWS_CRLF 37 +, {.name = "multiple connection header values with folding and lws" + ,.type= HTTP_REQUEST + ,.raw= "GET /demo HTTP/1.1\r\n" + "Connection: keep-alive, \r\n upgrade\r\n" + "Upgrade: WebSocket\r\n" + "\r\n" + "Hot diggity dogg" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_GET + ,.query_string= "" + ,.fragment= "" + ,.request_path= "/demo" + ,.request_url= "/demo" + ,.num_headers= 2 + ,.upgrade="Hot diggity dogg" + ,.headers= { { "Connection", "keep-alive, upgrade" } + , { "Upgrade", "WebSocket" } + } + ,.body= "" + } + +#define UPGRADE_POST_REQUEST 38 +, {.name = "upgrade post request" + ,.type= HTTP_REQUEST + ,.raw= "POST /demo HTTP/1.1\r\n" + "Host: example.com\r\n" + "Connection: Upgrade\r\n" + "Upgrade: HTTP/2.0\r\n" + "Content-Length: 15\r\n" + "\r\n" + "sweet post body" + "Hot diggity dogg" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_POST + ,.request_path= "/demo" + ,.request_url= "/demo" + ,.num_headers= 4 + ,.upgrade="Hot diggity dogg" + ,.headers= { { "Host", "example.com" } + , { "Connection", "Upgrade" } + , { "Upgrade", "HTTP/2.0" } + , { "Content-Length", "15" } + } + ,.body= "sweet post body" + } + +#define CONNECT_WITH_BODY_REQUEST 39 +, {.name = "connect with body request" + ,.type= HTTP_REQUEST + ,.raw= "CONNECT foo.bar.com:443 HTTP/1.0\r\n" + "User-agent: Mozilla/1.1N\r\n" + "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n" + "Content-Length: 10\r\n" + "\r\n" + "blarfcicle" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_CONNECT + ,.request_url= "foo.bar.com:443" + ,.num_headers= 3 + ,.upgrade="blarfcicle" + ,.headers= { { "User-agent", "Mozilla/1.1N" } + , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" } + , { "Content-Length", "10" } + } + ,.body= "" + } + +/* Examples from the Internet draft for LINK/UNLINK methods: + * https://tools.ietf.org/id/draft-snell-link-method-01.html#rfc.section.5 + */ + +#define LINK_REQUEST 40 +, {.name = "link request" + ,.type= HTTP_REQUEST + ,.raw= "LINK /images/my_dog.jpg HTTP/1.1\r\n" + "Host: example.com\r\n" + "Link: ; rel=\"tag\"\r\n" + "Link: ; rel=\"tag\"\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_LINK + ,.request_path= "/images/my_dog.jpg" + ,.request_url= "/images/my_dog.jpg" + ,.query_string= "" + ,.fragment= "" + ,.num_headers= 3 + ,.headers= { { "Host", "example.com" } + , { "Link", "; rel=\"tag\"" } + , { "Link", "; rel=\"tag\"" } + } + ,.body= "" + } + +#define UNLINK_REQUEST 41 +, {.name = "unlink request" + ,.type= HTTP_REQUEST + ,.raw= "UNLINK /images/my_dog.jpg HTTP/1.1\r\n" + "Host: example.com\r\n" + "Link: ; rel=\"tag\"\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_UNLINK + ,.request_path= "/images/my_dog.jpg" + ,.request_url= "/images/my_dog.jpg" + ,.query_string= "" + ,.fragment= "" + ,.num_headers= 2 + ,.headers= { { "Host", "example.com" } + , { "Link", "; rel=\"tag\"" } + } + ,.body= "" + } + +#define SOURCE_REQUEST 42 +, {.name = "source request" + ,.type= HTTP_REQUEST + ,.raw= "SOURCE /music/sweet/music HTTP/1.1\r\n" + "Host: example.com\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.method= HTTP_SOURCE + ,.request_path= "/music/sweet/music" + ,.request_url= "/music/sweet/music" + ,.query_string= "" + ,.fragment= "" + ,.num_headers= 1 + ,.headers= { { "Host", "example.com" } } + ,.body= "" + } + +#define SOURCE_ICE_REQUEST 42 +, {.name = "source request" + ,.type= HTTP_REQUEST + ,.raw= "SOURCE /music/sweet/music ICE/1.0\r\n" + "Host: example.com\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.method= HTTP_SOURCE + ,.request_path= "/music/sweet/music" + ,.request_url= "/music/sweet/music" + ,.query_string= "" + ,.fragment= "" + ,.num_headers= 1 + ,.headers= { { "Host", "example.com" } } + ,.body= "" + } +}; + +/* * R E S P O N S E S * */ +const struct message responses[] = +#define GOOGLE_301 0 +{ {.name= "google 301" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 301 Moved Permanently\r\n" + "Location: http://www.google.com/\r\n" + "Content-Type: text/html; charset=UTF-8\r\n" + "Date: Sun, 26 Apr 2009 11:11:49 GMT\r\n" + "Expires: Tue, 26 May 2009 11:11:49 GMT\r\n" + "X-$PrototypeBI-Version: 1.6.0.3\r\n" /* $ char in header field */ + "Cache-Control: public, max-age=2592000\r\n" + "Server: gws\r\n" + "Content-Length: 219 \r\n" + "\r\n" + "\n" + "301 Moved\n" + "

301 Moved

\n" + "The document has moved\n" + "here.\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 301 + ,.response_status= "Moved Permanently" + ,.num_headers= 8 + ,.headers= + { { "Location", "http://www.google.com/" } + , { "Content-Type", "text/html; charset=UTF-8" } + , { "Date", "Sun, 26 Apr 2009 11:11:49 GMT" } + , { "Expires", "Tue, 26 May 2009 11:11:49 GMT" } + , { "X-$PrototypeBI-Version", "1.6.0.3" } + , { "Cache-Control", "public, max-age=2592000" } + , { "Server", "gws" } + , { "Content-Length", "219 " } + } + ,.body= "\n" + "301 Moved\n" + "

301 Moved

\n" + "The document has moved\n" + "here.\r\n" + "\r\n" + } + +#define NO_CONTENT_LENGTH_RESPONSE 1 +/* The client should wait for the server's EOF. That is, when content-length + * is not specified, and "Connection: close", the end of body is specified + * by the EOF. + * Compare with APACHEBENCH_GET + */ +, {.name= "no content-length response" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Date: Tue, 04 Aug 2009 07:59:32 GMT\r\n" + "Server: Apache\r\n" + "X-Powered-By: Servlet/2.5 JSP/2.1\r\n" + "Content-Type: text/xml; charset=utf-8\r\n" + "Connection: close\r\n" + "\r\n" + "\n" + "\n" + " \n" + " \n" + " SOAP-ENV:Client\n" + " Client Error\n" + " \n" + " \n" + "" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 5 + ,.headers= + { { "Date", "Tue, 04 Aug 2009 07:59:32 GMT" } + , { "Server", "Apache" } + , { "X-Powered-By", "Servlet/2.5 JSP/2.1" } + , { "Content-Type", "text/xml; charset=utf-8" } + , { "Connection", "close" } + } + ,.body= "\n" + "\n" + " \n" + " \n" + " SOAP-ENV:Client\n" + " Client Error\n" + " \n" + " \n" + "" + } + +#define NO_HEADERS_NO_BODY_404 2 +, {.name= "404 no headers no body" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 404 Not Found\r\n\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 404 + ,.response_status= "Not Found" + ,.num_headers= 0 + ,.headers= {} + ,.body_size= 0 + ,.body= "" + } + +#define NO_REASON_PHRASE 3 +, {.name= "301 no response phrase" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 301\r\n\r\n" + ,.should_keep_alive = FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 301 + ,.response_status= "" + ,.num_headers= 0 + ,.headers= {} + ,.body= "" + } + +#define TRAILING_SPACE_ON_CHUNKED_BODY 4 +, {.name="200 trailing space on chunked body" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "25 \r\n" + "This is the data in the first chunk\r\n" + "\r\n" + "1C\r\n" + "and this is the second one\r\n" + "\r\n" + "0 \r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 2 + ,.headers= + { {"Content-Type", "text/plain" } + , {"Transfer-Encoding", "chunked" } + } + ,.body_size = 37+28 + ,.body = + "This is the data in the first chunk\r\n" + "and this is the second one\r\n" + ,.num_chunks_complete= 3 + ,.chunk_lengths= { 0x25, 0x1c } + } + +#define NO_CARRIAGE_RET 5 +, {.name="no carriage ret" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\n" + "Content-Type: text/html; charset=utf-8\n" + "Connection: close\n" + "\n" + "these headers are from http://news.ycombinator.com/" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 2 + ,.headers= + { {"Content-Type", "text/html; charset=utf-8" } + , {"Connection", "close" } + } + ,.body= "these headers are from http://news.ycombinator.com/" + } + +#define PROXY_CONNECTION 6 +, {.name="proxy connection" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Content-Type: text/html; charset=UTF-8\r\n" + "Content-Length: 11\r\n" + "Proxy-Connection: close\r\n" + "Date: Thu, 31 Dec 2009 20:55:48 +0000\r\n" + "\r\n" + "hello world" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 4 + ,.headers= + { {"Content-Type", "text/html; charset=UTF-8" } + , {"Content-Length", "11" } + , {"Proxy-Connection", "close" } + , {"Date", "Thu, 31 Dec 2009 20:55:48 +0000"} + } + ,.body= "hello world" + } + +#define UNDERSTORE_HEADER_KEY 7 + // shown by + // curl -o /dev/null -v "http://ad.doubleclick.net/pfadx/DARTSHELLCONFIGXML;dcmt=text/xml;" +, {.name="underscore header key" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Server: DCLK-AdSvr\r\n" + "Content-Type: text/xml\r\n" + "Content-Length: 0\r\n" + "DCLK_imp: v7;x;114750856;0-0;0;17820020;0/0;21603567/21621457/1;;~okv=;dcmt=text/xml;;~cs=o\r\n\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 4 + ,.headers= + { {"Server", "DCLK-AdSvr" } + , {"Content-Type", "text/xml" } + , {"Content-Length", "0" } + , {"DCLK_imp", "v7;x;114750856;0-0;0;17820020;0/0;21603567/21621457/1;;~okv=;dcmt=text/xml;;~cs=o" } + } + ,.body= "" + } + +#define BONJOUR_MADAME_FR 8 +/* The client should not merge two headers fields when the first one doesn't + * have a value. + */ +, {.name= "bonjourmadame.fr" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.0 301 Moved Permanently\r\n" + "Date: Thu, 03 Jun 2010 09:56:32 GMT\r\n" + "Server: Apache/2.2.3 (Red Hat)\r\n" + "Cache-Control: public\r\n" + "Pragma: \r\n" + "Location: http://www.bonjourmadame.fr/\r\n" + "Vary: Accept-Encoding\r\n" + "Content-Length: 0\r\n" + "Content-Type: text/html; charset=UTF-8\r\n" + "Connection: keep-alive\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.status_code= 301 + ,.response_status= "Moved Permanently" + ,.num_headers= 9 + ,.headers= + { { "Date", "Thu, 03 Jun 2010 09:56:32 GMT" } + , { "Server", "Apache/2.2.3 (Red Hat)" } + , { "Cache-Control", "public" } + , { "Pragma", "" } + , { "Location", "http://www.bonjourmadame.fr/" } + , { "Vary", "Accept-Encoding" } + , { "Content-Length", "0" } + , { "Content-Type", "text/html; charset=UTF-8" } + , { "Connection", "keep-alive" } + } + ,.body= "" + } + +#define RES_FIELD_UNDERSCORE 9 +/* Should handle spaces in header fields */ +, {.name= "field underscore" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Date: Tue, 28 Sep 2010 01:14:13 GMT\r\n" + "Server: Apache\r\n" + "Cache-Control: no-cache, must-revalidate\r\n" + "Expires: Mon, 26 Jul 1997 05:00:00 GMT\r\n" + ".et-Cookie: PlaxoCS=1274804622353690521; path=/; domain=.plaxo.com\r\n" + "Vary: Accept-Encoding\r\n" + "_eep-Alive: timeout=45\r\n" /* semantic value ignored */ + "_onnection: Keep-Alive\r\n" /* semantic value ignored */ + "Transfer-Encoding: chunked\r\n" + "Content-Type: text/html\r\n" + "Connection: close\r\n" + "\r\n" + "0\r\n\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 11 + ,.headers= + { { "Date", "Tue, 28 Sep 2010 01:14:13 GMT" } + , { "Server", "Apache" } + , { "Cache-Control", "no-cache, must-revalidate" } + , { "Expires", "Mon, 26 Jul 1997 05:00:00 GMT" } + , { ".et-Cookie", "PlaxoCS=1274804622353690521; path=/; domain=.plaxo.com" } + , { "Vary", "Accept-Encoding" } + , { "_eep-Alive", "timeout=45" } + , { "_onnection", "Keep-Alive" } + , { "Transfer-Encoding", "chunked" } + , { "Content-Type", "text/html" } + , { "Connection", "close" } + } + ,.body= "" + ,.num_chunks_complete= 1 + ,.chunk_lengths= {} + } + +#define NON_ASCII_IN_STATUS_LINE 10 +/* Should handle non-ASCII in status line */ +, {.name= "non-ASCII in status line" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 500 Oriëntatieprobleem\r\n" + "Date: Fri, 5 Nov 2010 23:07:12 GMT+2\r\n" + "Content-Length: 0\r\n" + "Connection: close\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 500 + ,.response_status= "Oriëntatieprobleem" + ,.num_headers= 3 + ,.headers= + { { "Date", "Fri, 5 Nov 2010 23:07:12 GMT+2" } + , { "Content-Length", "0" } + , { "Connection", "close" } + } + ,.body= "" + } + +#define HTTP_VERSION_0_9 11 +/* Should handle HTTP/0.9 */ +, {.name= "http version 0.9" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/0.9 200 OK\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 0 + ,.http_minor= 9 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 0 + ,.headers= + {} + ,.body= "" + } + +#define NO_CONTENT_LENGTH_NO_TRANSFER_ENCODING_RESPONSE 12 +/* The client should wait for the server's EOF. That is, when neither + * content-length nor transfer-encoding is specified, the end of body + * is specified by the EOF. + */ +, {.name= "neither content-length nor transfer-encoding response" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "hello world" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 1 + ,.headers= + { { "Content-Type", "text/plain" } + } + ,.body= "hello world" + } + +#define NO_BODY_HTTP10_KA_200 13 +, {.name= "HTTP/1.0 with keep-alive and EOF-terminated 200 status" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.0 200 OK\r\n" + "Connection: keep-alive\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 0 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 1 + ,.headers= + { { "Connection", "keep-alive" } + } + ,.body_size= 0 + ,.body= "" + } + +#define NO_BODY_HTTP10_KA_204 14 +, {.name= "HTTP/1.0 with keep-alive and a 204 status" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.0 204 No content\r\n" + "Connection: keep-alive\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.status_code= 204 + ,.response_status= "No content" + ,.num_headers= 1 + ,.headers= + { { "Connection", "keep-alive" } + } + ,.body_size= 0 + ,.body= "" + } + +#define NO_BODY_HTTP11_KA_200 15 +, {.name= "HTTP/1.1 with an EOF-terminated 200 status" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 0 + ,.headers={} + ,.body_size= 0 + ,.body= "" + } + +#define NO_BODY_HTTP11_KA_204 16 +, {.name= "HTTP/1.1 with a 204 status" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 204 No content\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 204 + ,.response_status= "No content" + ,.num_headers= 0 + ,.headers={} + ,.body_size= 0 + ,.body= "" + } + +#define NO_BODY_HTTP11_NOKA_204 17 +, {.name= "HTTP/1.1 with a 204 status and keep-alive disabled" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 204 No content\r\n" + "Connection: close\r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 204 + ,.response_status= "No content" + ,.num_headers= 1 + ,.headers= + { { "Connection", "close" } + } + ,.body_size= 0 + ,.body= "" + } + +#define NO_BODY_HTTP11_KA_CHUNKED_200 18 +, {.name= "HTTP/1.1 with chunked endocing and a 200 response" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "0\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 1 + ,.headers= + { { "Transfer-Encoding", "chunked" } + } + ,.body_size= 0 + ,.body= "" + ,.num_chunks_complete= 1 + } + +#if !HTTP_PARSER_STRICT +#define SPACE_IN_FIELD_RES 19 +/* Should handle spaces in header fields */ +, {.name= "field space" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Server: Microsoft-IIS/6.0\r\n" + "X-Powered-By: ASP.NET\r\n" + "en-US Content-Type: text/xml\r\n" /* this is the problem */ + "Content-Type: text/xml\r\n" + "Content-Length: 16\r\n" + "Date: Fri, 23 Jul 2010 18:45:38 GMT\r\n" + "Connection: keep-alive\r\n" + "\r\n" + "hello" /* fake body */ + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 7 + ,.headers= + { { "Server", "Microsoft-IIS/6.0" } + , { "X-Powered-By", "ASP.NET" } + , { "en-US Content-Type", "text/xml" } + , { "Content-Type", "text/xml" } + , { "Content-Length", "16" } + , { "Date", "Fri, 23 Jul 2010 18:45:38 GMT" } + , { "Connection", "keep-alive" } + } + ,.body= "hello" + } +#endif /* !HTTP_PARSER_STRICT */ + +#define AMAZON_COM 20 +, {.name= "amazon.com" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 301 MovedPermanently\r\n" + "Date: Wed, 15 May 2013 17:06:33 GMT\r\n" + "Server: Server\r\n" + "x-amz-id-1: 0GPHKXSJQ826RK7GZEB2\r\n" + "p3p: policyref=\"http://www.amazon.com/w3c/p3p.xml\",CP=\"CAO DSP LAW CUR ADM IVAo IVDo CONo OTPo OUR DELi PUBi OTRi BUS PHY ONL UNI PUR FIN COM NAV INT DEM CNT STA HEA PRE LOC GOV OTC \"\r\n" + "x-amz-id-2: STN69VZxIFSz9YJLbz1GDbxpbjG6Qjmmq5E3DxRhOUw+Et0p4hr7c/Q8qNcx4oAD\r\n" + "Location: http://www.amazon.com/Dan-Brown/e/B000AP9DSU/ref=s9_pop_gw_al1?_encoding=UTF8&refinementId=618073011&pf_rd_m=ATVPDKIKX0DER&pf_rd_s=center-2&pf_rd_r=0SHYY5BZXN3KR20BNFAY&pf_rd_t=101&pf_rd_p=1263340922&pf_rd_i=507846\r\n" + "Vary: Accept-Encoding,User-Agent\r\n" + "Content-Type: text/html; charset=ISO-8859-1\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "1\r\n" + "\n\r\n" + "0\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 301 + ,.response_status= "MovedPermanently" + ,.num_headers= 9 + ,.headers= { { "Date", "Wed, 15 May 2013 17:06:33 GMT" } + , { "Server", "Server" } + , { "x-amz-id-1", "0GPHKXSJQ826RK7GZEB2" } + , { "p3p", "policyref=\"http://www.amazon.com/w3c/p3p.xml\",CP=\"CAO DSP LAW CUR ADM IVAo IVDo CONo OTPo OUR DELi PUBi OTRi BUS PHY ONL UNI PUR FIN COM NAV INT DEM CNT STA HEA PRE LOC GOV OTC \"" } + , { "x-amz-id-2", "STN69VZxIFSz9YJLbz1GDbxpbjG6Qjmmq5E3DxRhOUw+Et0p4hr7c/Q8qNcx4oAD" } + , { "Location", "http://www.amazon.com/Dan-Brown/e/B000AP9DSU/ref=s9_pop_gw_al1?_encoding=UTF8&refinementId=618073011&pf_rd_m=ATVPDKIKX0DER&pf_rd_s=center-2&pf_rd_r=0SHYY5BZXN3KR20BNFAY&pf_rd_t=101&pf_rd_p=1263340922&pf_rd_i=507846" } + , { "Vary", "Accept-Encoding,User-Agent" } + , { "Content-Type", "text/html; charset=ISO-8859-1" } + , { "Transfer-Encoding", "chunked" } + } + ,.body= "\n" + ,.num_chunks_complete= 2 + ,.chunk_lengths= { 1 } + } + +#define EMPTY_REASON_PHRASE_AFTER_SPACE 20 +, {.name= "empty reason phrase after space" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 \r\n" + "\r\n" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "" + ,.num_headers= 0 + ,.headers= {} + ,.body= "" + } + +#define CONTENT_LENGTH_X 21 +, {.name= "Content-Length-X" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Content-Length-X: 0\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "2\r\n" + "OK\r\n" + "0\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 2 + ,.headers= { { "Content-Length-X", "0" } + , { "Transfer-Encoding", "chunked" } + } + ,.body= "OK" + ,.num_chunks_complete= 2 + ,.chunk_lengths= { 2 } + } + +#define HTTP_101_RESPONSE_WITH_UPGRADE_HEADER 22 +, {.name= "HTTP 101 response with Upgrade header" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 101 Switching Protocols\r\n" + "Connection: upgrade\r\n" + "Upgrade: h2c\r\n" + "\r\n" + "proto" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 101 + ,.response_status= "Switching Protocols" + ,.upgrade= "proto" + ,.num_headers= 2 + ,.headers= + { { "Connection", "upgrade" } + , { "Upgrade", "h2c" } + } + } + +#define HTTP_101_RESPONSE_WITH_UPGRADE_HEADER_AND_CONTENT_LENGTH 23 +, {.name= "HTTP 101 response with Upgrade and Content-Length header" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 101 Switching Protocols\r\n" + "Connection: upgrade\r\n" + "Upgrade: h2c\r\n" + "Content-Length: 4\r\n" + "\r\n" + "body" + "proto" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 101 + ,.response_status= "Switching Protocols" + ,.body= "body" + ,.upgrade= "proto" + ,.num_headers= 3 + ,.headers= + { { "Connection", "upgrade" } + , { "Upgrade", "h2c" } + , { "Content-Length", "4" } + } + } + +#define HTTP_101_RESPONSE_WITH_UPGRADE_HEADER_AND_TRANSFER_ENCODING 24 +, {.name= "HTTP 101 response with Upgrade and Transfer-Encoding header" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 101 Switching Protocols\r\n" + "Connection: upgrade\r\n" + "Upgrade: h2c\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "2\r\n" + "bo\r\n" + "2\r\n" + "dy\r\n" + "0\r\n" + "\r\n" + "proto" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 101 + ,.response_status= "Switching Protocols" + ,.body= "body" + ,.upgrade= "proto" + ,.num_headers= 3 + ,.headers= + { { "Connection", "upgrade" } + , { "Upgrade", "h2c" } + , { "Transfer-Encoding", "chunked" } + } + ,.num_chunks_complete= 3 + ,.chunk_lengths= { 2, 2 } + } + +#define HTTP_200_RESPONSE_WITH_UPGRADE_HEADER 25 +, {.name= "HTTP 200 response with Upgrade header" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Connection: upgrade\r\n" + "Upgrade: h2c\r\n" + "\r\n" + "body" + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= TRUE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.body= "body" + ,.upgrade= NULL + ,.num_headers= 2 + ,.headers= + { { "Connection", "upgrade" } + , { "Upgrade", "h2c" } + } + } + +#define HTTP_200_RESPONSE_WITH_UPGRADE_HEADER_AND_CONTENT_LENGTH 26 +, {.name= "HTTP 200 response with Upgrade and Content-Length header" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Connection: upgrade\r\n" + "Upgrade: h2c\r\n" + "Content-Length: 4\r\n" + "\r\n" + "body" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 3 + ,.body= "body" + ,.upgrade= NULL + ,.headers= + { { "Connection", "upgrade" } + , { "Upgrade", "h2c" } + , { "Content-Length", "4" } + } + } + +#define HTTP_200_RESPONSE_WITH_UPGRADE_HEADER_AND_TRANSFER_ENCODING 27 +, {.name= "HTTP 200 response with Upgrade and Transfer-Encoding header" + ,.type= HTTP_RESPONSE + ,.raw= "HTTP/1.1 200 OK\r\n" + "Connection: upgrade\r\n" + "Upgrade: h2c\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n" + "2\r\n" + "bo\r\n" + "2\r\n" + "dy\r\n" + "0\r\n" + "\r\n" + ,.should_keep_alive= TRUE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 1 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 3 + ,.body= "body" + ,.upgrade= NULL + ,.headers= + { { "Connection", "upgrade" } + , { "Upgrade", "h2c" } + , { "Transfer-Encoding", "chunked" } + } + ,.num_chunks_complete= 3 + ,.chunk_lengths= { 2, 2 } + } +}; + +/* strnlen() is a POSIX.2008 addition. Can't rely on it being available so + * define it ourselves. + */ +size_t +strnlen(const char *s, size_t maxlen) +{ + const char *p; + + p = memchr(s, '\0', maxlen); + if (p == NULL) + return maxlen; + + return p - s; +} + +size_t +strlncat(char *dst, size_t len, const char *src, size_t n) +{ + size_t slen; + size_t dlen; + size_t rlen; + size_t ncpy; + + slen = strnlen(src, n); + dlen = strnlen(dst, len); + + if (dlen < len) { + rlen = len - dlen; + ncpy = slen < rlen ? slen : (rlen - 1); + memcpy(dst + dlen, src, ncpy); + dst[dlen + ncpy] = '\0'; + } + + assert(len > slen + dlen); + return slen + dlen; +} + +size_t +strlncpy(char *dst, size_t len, const char *src, size_t n) +{ + size_t slen; + size_t ncpy; + + slen = strnlen(src, n); + + if (len > 0) { + ncpy = slen < len ? slen : (len - 1); + memcpy(dst, src, ncpy); + dst[ncpy] = '\0'; + } + + assert(len > slen); + return slen; +} + +int +request_url_cb (http_parser *p, const char *buf, size_t len) +{ + assert(p == &parser); + strlncat(messages[num_messages].request_url, + sizeof(messages[num_messages].request_url), + buf, + len); + return 0; +} + +int +header_field_cb (http_parser *p, const char *buf, size_t len) +{ + assert(p == &parser); + struct message *m = &messages[num_messages]; + + if (m->last_header_element != FIELD) + m->num_headers++; + + strlncat(m->headers[m->num_headers-1][0], + sizeof(m->headers[m->num_headers-1][0]), + buf, + len); + + m->last_header_element = FIELD; + + return 0; +} + +int +header_value_cb (http_parser *p, const char *buf, size_t len) +{ + assert(p == &parser); + struct message *m = &messages[num_messages]; + + strlncat(m->headers[m->num_headers-1][1], + sizeof(m->headers[m->num_headers-1][1]), + buf, + len); + + m->last_header_element = VALUE; + + return 0; +} + +void +check_body_is_final (const http_parser *p) +{ + if (messages[num_messages].body_is_final) { + fprintf(stderr, "\n\n *** Error http_body_is_final() should return 1 " + "on last on_body callback call " + "but it doesn't! ***\n\n"); + assert(0); + abort(); + } + messages[num_messages].body_is_final = http_body_is_final(p); +} + +int +body_cb (http_parser *p, const char *buf, size_t len) +{ + assert(p == &parser); + strlncat(messages[num_messages].body, + sizeof(messages[num_messages].body), + buf, + len); + messages[num_messages].body_size += len; + check_body_is_final(p); + // printf("body_cb: '%s'\n", requests[num_messages].body); + return 0; +} + +int +count_body_cb (http_parser *p, const char *buf, size_t len) +{ + assert(p == &parser); + assert(buf); + messages[num_messages].body_size += len; + check_body_is_final(p); + return 0; +} + +int +message_begin_cb (http_parser *p) +{ + assert(p == &parser); + assert(!messages[num_messages].message_begin_cb_called); + messages[num_messages].message_begin_cb_called = TRUE; + return 0; +} + +int +headers_complete_cb (http_parser *p) +{ + assert(p == &parser); + messages[num_messages].method = parser.method; + messages[num_messages].status_code = parser.status_code; + messages[num_messages].http_major = parser.http_major; + messages[num_messages].http_minor = parser.http_minor; + messages[num_messages].headers_complete_cb_called = TRUE; + messages[num_messages].should_keep_alive = http_should_keep_alive(&parser); + return 0; +} + +int +message_complete_cb (http_parser *p) +{ + assert(p == &parser); + if (messages[num_messages].should_keep_alive != + http_should_keep_alive(&parser)) + { + fprintf(stderr, "\n\n *** Error http_should_keep_alive() should have same " + "value in both on_message_complete and on_headers_complete " + "but it doesn't! ***\n\n"); + assert(0); + abort(); + } + + if (messages[num_messages].body_size && + http_body_is_final(p) && + !messages[num_messages].body_is_final) + { + fprintf(stderr, "\n\n *** Error http_body_is_final() should return 1 " + "on last on_body callback call " + "but it doesn't! ***\n\n"); + assert(0); + abort(); + } + + messages[num_messages].message_complete_cb_called = TRUE; + + messages[num_messages].message_complete_on_eof = currently_parsing_eof; + + num_messages++; + return 0; +} + +int +response_status_cb (http_parser *p, const char *buf, size_t len) +{ + assert(p == &parser); + + messages[num_messages].status_cb_called = TRUE; + + strlncat(messages[num_messages].response_status, + sizeof(messages[num_messages].response_status), + buf, + len); + return 0; +} + +int +chunk_header_cb (http_parser *p) +{ + assert(p == &parser); + int chunk_idx = messages[num_messages].num_chunks; + messages[num_messages].num_chunks++; + if (chunk_idx < MAX_CHUNKS) { + messages[num_messages].chunk_lengths[chunk_idx] = p->content_length; + } + + return 0; +} + +int +chunk_complete_cb (http_parser *p) +{ + assert(p == &parser); + + /* Here we want to verify that each chunk_header_cb is matched by a + * chunk_complete_cb, so not only should the total number of calls to + * both callbacks be the same, but they also should be interleaved + * properly */ + assert(messages[num_messages].num_chunks == + messages[num_messages].num_chunks_complete + 1); + + messages[num_messages].num_chunks_complete++; + return 0; +} + +/* These dontcall_* callbacks exist so that we can verify that when we're + * paused, no additional callbacks are invoked */ +int +dontcall_message_begin_cb (http_parser *p) +{ + if (p) { } // gcc + fprintf(stderr, "\n\n*** on_message_begin() called on paused parser ***\n\n"); + abort(); +} + +int +dontcall_header_field_cb (http_parser *p, const char *buf, size_t len) +{ + if (p || buf || len) { } // gcc + fprintf(stderr, "\n\n*** on_header_field() called on paused parser ***\n\n"); + abort(); +} + +int +dontcall_header_value_cb (http_parser *p, const char *buf, size_t len) +{ + if (p || buf || len) { } // gcc + fprintf(stderr, "\n\n*** on_header_value() called on paused parser ***\n\n"); + abort(); +} + +int +dontcall_request_url_cb (http_parser *p, const char *buf, size_t len) +{ + if (p || buf || len) { } // gcc + fprintf(stderr, "\n\n*** on_request_url() called on paused parser ***\n\n"); + abort(); +} + +int +dontcall_body_cb (http_parser *p, const char *buf, size_t len) +{ + if (p || buf || len) { } // gcc + fprintf(stderr, "\n\n*** on_body_cb() called on paused parser ***\n\n"); + abort(); +} + +int +dontcall_headers_complete_cb (http_parser *p) +{ + if (p) { } // gcc + fprintf(stderr, "\n\n*** on_headers_complete() called on paused " + "parser ***\n\n"); + abort(); +} + +int +dontcall_message_complete_cb (http_parser *p) +{ + if (p) { } // gcc + fprintf(stderr, "\n\n*** on_message_complete() called on paused " + "parser ***\n\n"); + abort(); +} + +int +dontcall_response_status_cb (http_parser *p, const char *buf, size_t len) +{ + if (p || buf || len) { } // gcc + fprintf(stderr, "\n\n*** on_status() called on paused parser ***\n\n"); + abort(); +} + +int +dontcall_chunk_header_cb (http_parser *p) +{ + if (p) { } // gcc + fprintf(stderr, "\n\n*** on_chunk_header() called on paused parser ***\n\n"); + exit(1); +} + +int +dontcall_chunk_complete_cb (http_parser *p) +{ + if (p) { } // gcc + fprintf(stderr, "\n\n*** on_chunk_complete() " + "called on paused parser ***\n\n"); + exit(1); +} + +static http_parser_settings settings_dontcall = + {.on_message_begin = dontcall_message_begin_cb + ,.on_header_field = dontcall_header_field_cb + ,.on_header_value = dontcall_header_value_cb + ,.on_url = dontcall_request_url_cb + ,.on_status = dontcall_response_status_cb + ,.on_body = dontcall_body_cb + ,.on_headers_complete = dontcall_headers_complete_cb + ,.on_message_complete = dontcall_message_complete_cb + ,.on_chunk_header = dontcall_chunk_header_cb + ,.on_chunk_complete = dontcall_chunk_complete_cb + }; + +/* These pause_* callbacks always pause the parser and just invoke the regular + * callback that tracks content. Before returning, we overwrite the parser + * settings to point to the _dontcall variety so that we can verify that + * the pause actually did, you know, pause. */ +int +pause_message_begin_cb (http_parser *p) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return message_begin_cb(p); +} + +int +pause_header_field_cb (http_parser *p, const char *buf, size_t len) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return header_field_cb(p, buf, len); +} + +int +pause_header_value_cb (http_parser *p, const char *buf, size_t len) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return header_value_cb(p, buf, len); +} + +int +pause_request_url_cb (http_parser *p, const char *buf, size_t len) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return request_url_cb(p, buf, len); +} + +int +pause_body_cb (http_parser *p, const char *buf, size_t len) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return body_cb(p, buf, len); +} + +int +pause_headers_complete_cb (http_parser *p) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return headers_complete_cb(p); +} + +int +pause_message_complete_cb (http_parser *p) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return message_complete_cb(p); +} + +int +pause_response_status_cb (http_parser *p, const char *buf, size_t len) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return response_status_cb(p, buf, len); +} + +int +pause_chunk_header_cb (http_parser *p) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return chunk_header_cb(p); +} + +int +pause_chunk_complete_cb (http_parser *p) +{ + http_parser_pause(p, 1); + *current_pause_parser = settings_dontcall; + return chunk_complete_cb(p); +} + +int +connect_headers_complete_cb (http_parser *p) +{ + headers_complete_cb(p); + return 1; +} + +int +connect_message_complete_cb (http_parser *p) +{ + messages[num_messages].should_keep_alive = http_should_keep_alive(&parser); + return message_complete_cb(p); +} + +static http_parser_settings settings_pause = + {.on_message_begin = pause_message_begin_cb + ,.on_header_field = pause_header_field_cb + ,.on_header_value = pause_header_value_cb + ,.on_url = pause_request_url_cb + ,.on_status = pause_response_status_cb + ,.on_body = pause_body_cb + ,.on_headers_complete = pause_headers_complete_cb + ,.on_message_complete = pause_message_complete_cb + ,.on_chunk_header = pause_chunk_header_cb + ,.on_chunk_complete = pause_chunk_complete_cb + }; + +static http_parser_settings settings = + {.on_message_begin = message_begin_cb + ,.on_header_field = header_field_cb + ,.on_header_value = header_value_cb + ,.on_url = request_url_cb + ,.on_status = response_status_cb + ,.on_body = body_cb + ,.on_headers_complete = headers_complete_cb + ,.on_message_complete = message_complete_cb + ,.on_chunk_header = chunk_header_cb + ,.on_chunk_complete = chunk_complete_cb + }; + +static http_parser_settings settings_count_body = + {.on_message_begin = message_begin_cb + ,.on_header_field = header_field_cb + ,.on_header_value = header_value_cb + ,.on_url = request_url_cb + ,.on_status = response_status_cb + ,.on_body = count_body_cb + ,.on_headers_complete = headers_complete_cb + ,.on_message_complete = message_complete_cb + ,.on_chunk_header = chunk_header_cb + ,.on_chunk_complete = chunk_complete_cb + }; + +static http_parser_settings settings_connect = + {.on_message_begin = message_begin_cb + ,.on_header_field = header_field_cb + ,.on_header_value = header_value_cb + ,.on_url = request_url_cb + ,.on_status = response_status_cb + ,.on_body = dontcall_body_cb + ,.on_headers_complete = connect_headers_complete_cb + ,.on_message_complete = connect_message_complete_cb + ,.on_chunk_header = chunk_header_cb + ,.on_chunk_complete = chunk_complete_cb + }; + +static http_parser_settings settings_null = + {.on_message_begin = 0 + ,.on_header_field = 0 + ,.on_header_value = 0 + ,.on_url = 0 + ,.on_status = 0 + ,.on_body = 0 + ,.on_headers_complete = 0 + ,.on_message_complete = 0 + ,.on_chunk_header = 0 + ,.on_chunk_complete = 0 + }; + +void +parser_init (enum http_parser_type type) +{ + num_messages = 0; + http_parser_init(&parser, type); + memset(&messages, 0, sizeof messages); +} + +size_t parse (const char *buf, size_t len) +{ + size_t nparsed; + currently_parsing_eof = (len == 0); + nparsed = http_parser_execute(&parser, &settings, buf, len); + return nparsed; +} + +size_t parse_count_body (const char *buf, size_t len) +{ + size_t nparsed; + currently_parsing_eof = (len == 0); + nparsed = http_parser_execute(&parser, &settings_count_body, buf, len); + return nparsed; +} + +size_t parse_pause (const char *buf, size_t len) +{ + size_t nparsed; + http_parser_settings s = settings_pause; + + currently_parsing_eof = (len == 0); + current_pause_parser = &s; + nparsed = http_parser_execute(&parser, current_pause_parser, buf, len); + return nparsed; +} + +size_t parse_connect (const char *buf, size_t len) +{ + size_t nparsed; + currently_parsing_eof = (len == 0); + nparsed = http_parser_execute(&parser, &settings_connect, buf, len); + return nparsed; +} + +static inline int +check_str_eq (const struct message *m, + const char *prop, + const char *expected, + const char *found) { + if ((expected == NULL) != (found == NULL)) { + printf("\n*** Error: %s in '%s' ***\n\n", prop, m->name); + printf("expected %s\n", (expected == NULL) ? "NULL" : expected); + printf(" found %s\n", (found == NULL) ? "NULL" : found); + return 0; + } + if (expected != NULL && 0 != strcmp(expected, found)) { + printf("\n*** Error: %s in '%s' ***\n\n", prop, m->name); + printf("expected '%s'\n", expected); + printf(" found '%s'\n", found); + return 0; + } + return 1; +} + +static inline int +check_num_eq (const struct message *m, + const char *prop, + int expected, + int found) { + if (expected != found) { + printf("\n*** Error: %s in '%s' ***\n\n", prop, m->name); + printf("expected %d\n", expected); + printf(" found %d\n", found); + return 0; + } + return 1; +} + +#define MESSAGE_CHECK_STR_EQ(expected, found, prop) \ + if (!check_str_eq(expected, #prop, expected->prop, found->prop)) return 0 + +#define MESSAGE_CHECK_NUM_EQ(expected, found, prop) \ + if (!check_num_eq(expected, #prop, expected->prop, found->prop)) return 0 + +#define MESSAGE_CHECK_URL_EQ(u, expected, found, prop, fn) \ +do { \ + char ubuf[256]; \ + \ + if ((u)->field_set & (1 << (fn))) { \ + memcpy(ubuf, (found)->request_url + (u)->field_data[(fn)].off, \ + (u)->field_data[(fn)].len); \ + ubuf[(u)->field_data[(fn)].len] = '\0'; \ + } else { \ + ubuf[0] = '\0'; \ + } \ + \ + check_str_eq(expected, #prop, expected->prop, ubuf); \ +} while(0) + +int +message_eq (int index, int connect, const struct message *expected) +{ + int i; + struct message *m = &messages[index]; + + MESSAGE_CHECK_NUM_EQ(expected, m, http_major); + MESSAGE_CHECK_NUM_EQ(expected, m, http_minor); + + if (expected->type == HTTP_REQUEST) { + MESSAGE_CHECK_NUM_EQ(expected, m, method); + } else { + MESSAGE_CHECK_NUM_EQ(expected, m, status_code); + MESSAGE_CHECK_STR_EQ(expected, m, response_status); + assert(m->status_cb_called); + } + + if (!connect) { + MESSAGE_CHECK_NUM_EQ(expected, m, should_keep_alive); + MESSAGE_CHECK_NUM_EQ(expected, m, message_complete_on_eof); + } + + assert(m->message_begin_cb_called); + assert(m->headers_complete_cb_called); + assert(m->message_complete_cb_called); + + + MESSAGE_CHECK_STR_EQ(expected, m, request_url); + + /* Check URL components; we can't do this w/ CONNECT since it doesn't + * send us a well-formed URL. + */ + if (*m->request_url && m->method != HTTP_CONNECT) { + struct http_parser_url u; + + if (http_parser_parse_url(m->request_url, strlen(m->request_url), 0, &u)) { + fprintf(stderr, "\n\n*** failed to parse URL %s ***\n\n", + m->request_url); + abort(); + } + + if (expected->host) { + MESSAGE_CHECK_URL_EQ(&u, expected, m, host, UF_HOST); + } + + if (expected->userinfo) { + MESSAGE_CHECK_URL_EQ(&u, expected, m, userinfo, UF_USERINFO); + } + + m->port = (u.field_set & (1 << UF_PORT)) ? + u.port : 0; + + MESSAGE_CHECK_URL_EQ(&u, expected, m, query_string, UF_QUERY); + MESSAGE_CHECK_URL_EQ(&u, expected, m, fragment, UF_FRAGMENT); + MESSAGE_CHECK_URL_EQ(&u, expected, m, request_path, UF_PATH); + MESSAGE_CHECK_NUM_EQ(expected, m, port); + } + + if (connect) { + check_num_eq(m, "body_size", 0, m->body_size); + } else if (expected->body_size) { + MESSAGE_CHECK_NUM_EQ(expected, m, body_size); + } else { + MESSAGE_CHECK_STR_EQ(expected, m, body); + } + + if (connect) { + check_num_eq(m, "num_chunks_complete", 0, m->num_chunks_complete); + } else { + assert(m->num_chunks == m->num_chunks_complete); + MESSAGE_CHECK_NUM_EQ(expected, m, num_chunks_complete); + for (i = 0; i < m->num_chunks && i < MAX_CHUNKS; i++) { + MESSAGE_CHECK_NUM_EQ(expected, m, chunk_lengths[i]); + } + } + + MESSAGE_CHECK_NUM_EQ(expected, m, num_headers); + + int r; + for (i = 0; i < m->num_headers; i++) { + r = check_str_eq(expected, "header field", expected->headers[i][0], m->headers[i][0]); + if (!r) return 0; + r = check_str_eq(expected, "header value", expected->headers[i][1], m->headers[i][1]); + if (!r) return 0; + } + + if (!connect) { + MESSAGE_CHECK_STR_EQ(expected, m, upgrade); + } + + return 1; +} + +/* Given a sequence of varargs messages, return the number of them that the + * parser should successfully parse, taking into account that upgraded + * messages prevent all subsequent messages from being parsed. + */ +size_t +count_parsed_messages(const size_t nmsgs, ...) { + size_t i; + va_list ap; + + va_start(ap, nmsgs); + + for (i = 0; i < nmsgs; i++) { + struct message *m = va_arg(ap, struct message *); + + if (m->upgrade) { + va_end(ap); + return i + 1; + } + } + + va_end(ap); + return nmsgs; +} + +/* Given a sequence of bytes and the number of these that we were able to + * parse, verify that upgrade bodies are correct. + */ +void +upgrade_message_fix(char *body, const size_t nread, const size_t nmsgs, ...) { + va_list ap; + size_t i; + size_t off = 0; + + va_start(ap, nmsgs); + + for (i = 0; i < nmsgs; i++) { + struct message *m = va_arg(ap, struct message *); + + off += strlen(m->raw); + + if (m->upgrade) { + off -= strlen(m->upgrade); + + /* Check the portion of the response after its specified upgrade */ + if (!check_str_eq(m, "upgrade", body + off, body + nread)) { + abort(); + } + + /* Fix up the response so that message_eq() will verify the beginning + * of the upgrade */ + *(body + nread + strlen(m->upgrade)) = '\0'; + messages[num_messages -1 ].upgrade = body + nread; + + va_end(ap); + return; + } + } + + va_end(ap); + printf("\n\n*** Error: expected a message with upgrade ***\n"); + + abort(); +} + +static void +print_error (const char *raw, size_t error_location) +{ + fprintf(stderr, "\n*** %s ***\n\n", + http_errno_description(HTTP_PARSER_ERRNO(&parser))); + + int this_line = 0, char_len = 0; + size_t i, j, len = strlen(raw), error_location_line = 0; + for (i = 0; i < len; i++) { + if (i == error_location) this_line = 1; + switch (raw[i]) { + case '\r': + char_len = 2; + fprintf(stderr, "\\r"); + break; + + case '\n': + fprintf(stderr, "\\n\n"); + + if (this_line) goto print; + + error_location_line = 0; + continue; + + default: + char_len = 1; + fputc(raw[i], stderr); + break; + } + if (!this_line) error_location_line += char_len; + } + + fprintf(stderr, "[eof]\n"); + + print: + for (j = 0; j < error_location_line; j++) { + fputc(' ', stderr); + } + fprintf(stderr, "^\n\nerror location: %u\n", (unsigned int)error_location); +} + +void +test_preserve_data (void) +{ + char my_data[] = "application-specific data"; + http_parser parser; + parser.data = my_data; + http_parser_init(&parser, HTTP_REQUEST); + if (parser.data != my_data) { + printf("\n*** parser.data not preserved accross http_parser_init ***\n\n"); + abort(); + } +} + +struct url_test { + const char *name; + const char *url; + int is_connect; + struct http_parser_url u; + int rv; +}; + +const struct url_test url_tests[] = +{ {.name="proxy request" + ,.url="http://hostname/" + ,.is_connect=0 + ,.u= + {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PATH) + ,.port=0 + ,.field_data= + {{ 0, 4 } /* UF_SCHEMA */ + ,{ 7, 8 } /* UF_HOST */ + ,{ 0, 0 } /* UF_PORT */ + ,{ 15, 1 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="proxy request with port" + ,.url="http://hostname:444/" + ,.is_connect=0 + ,.u= + {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PORT) | (1 << UF_PATH) + ,.port=444 + ,.field_data= + {{ 0, 4 } /* UF_SCHEMA */ + ,{ 7, 8 } /* UF_HOST */ + ,{ 16, 3 } /* UF_PORT */ + ,{ 19, 1 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="CONNECT request" + ,.url="hostname:443" + ,.is_connect=1 + ,.u= + {.field_set=(1 << UF_HOST) | (1 << UF_PORT) + ,.port=443 + ,.field_data= + {{ 0, 0 } /* UF_SCHEMA */ + ,{ 0, 8 } /* UF_HOST */ + ,{ 9, 3 } /* UF_PORT */ + ,{ 0, 0 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="CONNECT request but not connect" + ,.url="hostname:443" + ,.is_connect=0 + ,.rv=1 + } + +, {.name="proxy ipv6 request" + ,.url="http://[1:2::3:4]/" + ,.is_connect=0 + ,.u= + {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PATH) + ,.port=0 + ,.field_data= + {{ 0, 4 } /* UF_SCHEMA */ + ,{ 8, 8 } /* UF_HOST */ + ,{ 0, 0 } /* UF_PORT */ + ,{ 17, 1 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="proxy ipv6 request with port" + ,.url="http://[1:2::3:4]:67/" + ,.is_connect=0 + ,.u= + {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PORT) | (1 << UF_PATH) + ,.port=67 + ,.field_data= + {{ 0, 4 } /* UF_SCHEMA */ + ,{ 8, 8 } /* UF_HOST */ + ,{ 18, 2 } /* UF_PORT */ + ,{ 20, 1 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="CONNECT ipv6 address" + ,.url="[1:2::3:4]:443" + ,.is_connect=1 + ,.u= + {.field_set=(1 << UF_HOST) | (1 << UF_PORT) + ,.port=443 + ,.field_data= + {{ 0, 0 } /* UF_SCHEMA */ + ,{ 1, 8 } /* UF_HOST */ + ,{ 11, 3 } /* UF_PORT */ + ,{ 0, 0 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="ipv4 in ipv6 address" + ,.url="http://[2001:0000:0000:0000:0000:0000:1.9.1.1]/" + ,.is_connect=0 + ,.u= + {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PATH) + ,.port=0 + ,.field_data= + {{ 0, 4 } /* UF_SCHEMA */ + ,{ 8, 37 } /* UF_HOST */ + ,{ 0, 0 } /* UF_PORT */ + ,{ 46, 1 } /* UF_PATH */ + ,{ 0, 0 } /* UF_QUERY */ + ,{ 0, 0 } /* UF_FRAGMENT */ + ,{ 0, 0 } /* UF_USERINFO */ + } + } + ,.rv=0 + } + +, {.name="extra ? in query string" + ,.url="http://a.tbcdn.cn/p/fp/2010c/??fp-header-min.css,fp-base-min.css," + "fp-channel-min.css,fp-product-min.css,fp-mall-min.css,fp-category-min.css," + "fp-sub-min.css,fp-gdp4p-min.css,fp-css3-min.css,fp-misc-min.css?t=20101022.css" + ,.is_connect=0 + ,.u= + {.field_set=(1<field_set, u->port); + for (i = 0; i < UF_MAX; i++) { + if ((u->field_set & (1 << i)) == 0) { + printf("\tfield_data[%u]: unset\n", i); + continue; + } + + printf("\tfield_data[%u]: off: %u len: %u part: \"%.*s\n\"", + i, + u->field_data[i].off, + u->field_data[i].len, + u->field_data[i].len, + url + u->field_data[i].off); + } +} + +void +test_parse_url (void) +{ + struct http_parser_url u; + const struct url_test *test; + unsigned int i; + int rv; + + for (i = 0; i < (sizeof(url_tests) / sizeof(url_tests[0])); i++) { + test = &url_tests[i]; + memset(&u, 0, sizeof(u)); + + rv = http_parser_parse_url(test->url, + test->url ? strlen(test->url) : 0, + test->is_connect, + &u); + + if (test->rv == 0) { + if (rv != 0) { + printf("\n*** http_parser_parse_url(\"%s\") \"%s\" test failed, " + "unexpected rv %d ***\n\n", test->url, test->name, rv); + abort(); + } + + if (memcmp(&u, &test->u, sizeof(u)) != 0) { + printf("\n*** http_parser_parse_url(\"%s\") \"%s\" failed ***\n", + test->url, test->name); + + printf("target http_parser_url:\n"); + dump_url(test->url, &test->u); + printf("result http_parser_url:\n"); + dump_url(test->url, &u); + + abort(); + } + } else { + /* test->rv != 0 */ + if (rv == 0) { + printf("\n*** http_parser_parse_url(\"%s\") \"%s\" test failed, " + "unexpected rv %d ***\n\n", test->url, test->name, rv); + abort(); + } + } + } +} + +void +test_method_str (void) +{ + assert(0 == strcmp("GET", http_method_str(HTTP_GET))); + assert(0 == strcmp("", http_method_str(1337))); +} + +void +test_status_str (void) +{ + assert(0 == strcmp("OK", http_status_str(HTTP_STATUS_OK))); + assert(0 == strcmp("Not Found", http_status_str(HTTP_STATUS_NOT_FOUND))); + assert(0 == strcmp("", http_status_str(1337))); +} + +void +test_message (const struct message *message) +{ + size_t raw_len = strlen(message->raw); + size_t msg1len; + for (msg1len = 0; msg1len < raw_len; msg1len++) { + parser_init(message->type); + + size_t read; + const char *msg1 = message->raw; + const char *msg2 = msg1 + msg1len; + size_t msg2len = raw_len - msg1len; + + if (msg1len) { + assert(num_messages == 0); + messages[0].headers_complete_cb_called = FALSE; + + read = parse(msg1, msg1len); + + if (!messages[0].headers_complete_cb_called && parser.nread != read) { + assert(parser.nread == read); + print_error(msg1, read); + abort(); + } + + if (message->upgrade && parser.upgrade && num_messages > 0) { + messages[num_messages - 1].upgrade = msg1 + read; + goto test; + } + + if (read != msg1len) { + print_error(msg1, read); + abort(); + } + } + + + read = parse(msg2, msg2len); + + if (message->upgrade && parser.upgrade) { + messages[num_messages - 1].upgrade = msg2 + read; + goto test; + } + + if (read != msg2len) { + print_error(msg2, read); + abort(); + } + + read = parse(NULL, 0); + + if (read != 0) { + print_error(message->raw, read); + abort(); + } + + test: + + if (num_messages != 1) { + printf("\n*** num_messages != 1 after testing '%s' ***\n\n", message->name); + abort(); + } + + if(!message_eq(0, 0, message)) abort(); + } +} + +void +test_message_count_body (const struct message *message) +{ + parser_init(message->type); + + size_t read; + size_t l = strlen(message->raw); + size_t i, toread; + size_t chunk = 4024; + + for (i = 0; i < l; i+= chunk) { + toread = MIN(l-i, chunk); + read = parse_count_body(message->raw + i, toread); + if (read != toread) { + print_error(message->raw, read); + abort(); + } + } + + + read = parse_count_body(NULL, 0); + if (read != 0) { + print_error(message->raw, read); + abort(); + } + + if (num_messages != 1) { + printf("\n*** num_messages != 1 after testing '%s' ***\n\n", message->name); + abort(); + } + + if(!message_eq(0, 0, message)) abort(); +} + +void +test_simple_type (const char *buf, + enum http_errno err_expected, + enum http_parser_type type) +{ + parser_init(type); + + enum http_errno err; + + parse(buf, strlen(buf)); + err = HTTP_PARSER_ERRNO(&parser); + parse(NULL, 0); + + /* In strict mode, allow us to pass with an unexpected HPE_STRICT as + * long as the caller isn't expecting success. + */ +#if HTTP_PARSER_STRICT + if (err_expected != err && err_expected != HPE_OK && err != HPE_STRICT) { +#else + if (err_expected != err) { +#endif + fprintf(stderr, "\n*** test_simple expected %s, but saw %s ***\n\n%s\n", + http_errno_name(err_expected), http_errno_name(err), buf); + abort(); + } +} + +void +test_simple (const char *buf, enum http_errno err_expected) +{ + test_simple_type(buf, err_expected, HTTP_REQUEST); +} + +void +test_invalid_header_content (int req, const char* str) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + const char *buf; + buf = req ? + "GET / HTTP/1.1\r\n" : + "HTTP/1.1 200 OK\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + buf = str; + size_t buflen = strlen(buf); + + parsed = http_parser_execute(&parser, &settings_null, buf, buflen); + if (parsed != buflen) { + assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_HEADER_TOKEN); + return; + } + + fprintf(stderr, + "\n*** Error expected but none in invalid header content test ***\n"); + abort(); +} + +void +test_invalid_header_field_content_error (int req) +{ + test_invalid_header_content(req, "Foo: F\01ailure"); + test_invalid_header_content(req, "Foo: B\02ar"); +} + +void +test_invalid_header_field (int req, const char* str) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + const char *buf; + buf = req ? + "GET / HTTP/1.1\r\n" : + "HTTP/1.1 200 OK\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + buf = str; + size_t buflen = strlen(buf); + + parsed = http_parser_execute(&parser, &settings_null, buf, buflen); + if (parsed != buflen) { + assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_HEADER_TOKEN); + return; + } + + fprintf(stderr, + "\n*** Error expected but none in invalid header token test ***\n"); + abort(); +} + +void +test_invalid_header_field_token_error (int req) +{ + test_invalid_header_field(req, "Fo@: Failure"); + test_invalid_header_field(req, "Foo\01\test: Bar"); +} + +void +test_double_content_length_error (int req) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + const char *buf; + buf = req ? + "GET / HTTP/1.1\r\n" : + "HTTP/1.1 200 OK\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + buf = "Content-Length: 0\r\nContent-Length: 1\r\n\r\n"; + size_t buflen = strlen(buf); + + parsed = http_parser_execute(&parser, &settings_null, buf, buflen); + if (parsed != buflen) { + assert(HTTP_PARSER_ERRNO(&parser) == HPE_UNEXPECTED_CONTENT_LENGTH); + return; + } + + fprintf(stderr, + "\n*** Error expected but none in double content-length test ***\n"); + abort(); +} + +void +test_chunked_content_length_error (int req) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + const char *buf; + buf = req ? + "GET / HTTP/1.1\r\n" : + "HTTP/1.1 200 OK\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + buf = "Transfer-Encoding: chunked\r\nContent-Length: 1\r\n\r\n"; + size_t buflen = strlen(buf); + + parsed = http_parser_execute(&parser, &settings_null, buf, buflen); + if (parsed != buflen) { + assert(HTTP_PARSER_ERRNO(&parser) == HPE_UNEXPECTED_CONTENT_LENGTH); + return; + } + + fprintf(stderr, + "\n*** Error expected but none in chunked content-length test ***\n"); + abort(); +} + +void +test_header_cr_no_lf_error (int req) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + const char *buf; + buf = req ? + "GET / HTTP/1.1\r\n" : + "HTTP/1.1 200 OK\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + buf = "Foo: 1\rBar: 1\r\n\r\n"; + size_t buflen = strlen(buf); + + parsed = http_parser_execute(&parser, &settings_null, buf, buflen); + if (parsed != buflen) { + assert(HTTP_PARSER_ERRNO(&parser) == HPE_LF_EXPECTED); + return; + } + + fprintf(stderr, + "\n*** Error expected but none in header whitespace test ***\n"); + abort(); +} + +void +test_no_overflow_parse_url (void) +{ + int rv; + struct http_parser_url u; + + http_parser_url_init(&u); + rv = http_parser_parse_url("http://example.com:8001", 22, 0, &u); + + if (rv != 0) { + fprintf(stderr, + "\n*** test_no_overflow_parse_url invalid return value=%d\n", + rv); + abort(); + } + + if (u.port != 800) { + fprintf(stderr, + "\n*** test_no_overflow_parse_url invalid port number=%d\n", + u.port); + abort(); + } +} + +void +test_header_overflow_error (int req) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + const char *buf; + buf = req ? "GET / HTTP/1.1\r\n" : "HTTP/1.0 200 OK\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + buf = "header-key: header-value\r\n"; + size_t buflen = strlen(buf); + + int i; + for (i = 0; i < 10000; i++) { + parsed = http_parser_execute(&parser, &settings_null, buf, buflen); + if (parsed != buflen) { + //fprintf(stderr, "error found on iter %d\n", i); + assert(HTTP_PARSER_ERRNO(&parser) == HPE_HEADER_OVERFLOW); + return; + } + } + + fprintf(stderr, "\n*** Error expected but none in header overflow test ***\n"); + abort(); +} + + +void +test_header_nread_value () +{ + http_parser parser; + http_parser_init(&parser, HTTP_REQUEST); + size_t parsed; + const char *buf; + buf = "GET / HTTP/1.1\r\nheader: value\nhdr: value\r\n"; + parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf)); + assert(parsed == strlen(buf)); + + assert(parser.nread == strlen(buf)); +} + + +static void +test_content_length_overflow (const char *buf, size_t buflen, int expect_ok) +{ + http_parser parser; + http_parser_init(&parser, HTTP_RESPONSE); + http_parser_execute(&parser, &settings_null, buf, buflen); + + if (expect_ok) + assert(HTTP_PARSER_ERRNO(&parser) == HPE_OK); + else + assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_CONTENT_LENGTH); +} + +void +test_header_content_length_overflow_error (void) +{ +#define X(size) \ + "HTTP/1.1 200 OK\r\n" \ + "Content-Length: " #size "\r\n" \ + "\r\n" + const char a[] = X(1844674407370955160); /* 2^64 / 10 - 1 */ + const char b[] = X(18446744073709551615); /* 2^64-1 */ + const char c[] = X(18446744073709551616); /* 2^64 */ +#undef X + test_content_length_overflow(a, sizeof(a) - 1, 1); /* expect ok */ + test_content_length_overflow(b, sizeof(b) - 1, 0); /* expect failure */ + test_content_length_overflow(c, sizeof(c) - 1, 0); /* expect failure */ +} + +void +test_chunk_content_length_overflow_error (void) +{ +#define X(size) \ + "HTTP/1.1 200 OK\r\n" \ + "Transfer-Encoding: chunked\r\n" \ + "\r\n" \ + #size "\r\n" \ + "..." + const char a[] = X(FFFFFFFFFFFFFFE); /* 2^64 / 16 - 1 */ + const char b[] = X(FFFFFFFFFFFFFFFF); /* 2^64-1 */ + const char c[] = X(10000000000000000); /* 2^64 */ +#undef X + test_content_length_overflow(a, sizeof(a) - 1, 1); /* expect ok */ + test_content_length_overflow(b, sizeof(b) - 1, 0); /* expect failure */ + test_content_length_overflow(c, sizeof(c) - 1, 0); /* expect failure */ +} + +void +test_no_overflow_long_body (int req, size_t length) +{ + http_parser parser; + http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE); + size_t parsed; + size_t i; + char buf1[3000]; + size_t buf1len = sprintf(buf1, "%s\r\nConnection: Keep-Alive\r\nContent-Length: %lu\r\n\r\n", + req ? "POST / HTTP/1.0" : "HTTP/1.0 200 OK", (unsigned long)length); + parsed = http_parser_execute(&parser, &settings_null, buf1, buf1len); + if (parsed != buf1len) + goto err; + + for (i = 0; i < length; i++) { + char foo = 'a'; + parsed = http_parser_execute(&parser, &settings_null, &foo, 1); + if (parsed != 1) + goto err; + } + + parsed = http_parser_execute(&parser, &settings_null, buf1, buf1len); + if (parsed != buf1len) goto err; + return; + + err: + fprintf(stderr, + "\n*** error in test_no_overflow_long_body %s of length %lu ***\n", + req ? "REQUEST" : "RESPONSE", + (unsigned long)length); + abort(); +} + +void +test_multiple3 (const struct message *r1, const struct message *r2, const struct message *r3) +{ + int message_count = count_parsed_messages(3, r1, r2, r3); + + char total[ strlen(r1->raw) + + strlen(r2->raw) + + strlen(r3->raw) + + 1 + ]; + total[0] = '\0'; + + strcat(total, r1->raw); + strcat(total, r2->raw); + strcat(total, r3->raw); + + parser_init(r1->type); + + size_t read; + + read = parse(total, strlen(total)); + + if (parser.upgrade) { + upgrade_message_fix(total, read, 3, r1, r2, r3); + goto test; + } + + if (read != strlen(total)) { + print_error(total, read); + abort(); + } + + read = parse(NULL, 0); + + if (read != 0) { + print_error(total, read); + abort(); + } + +test: + + if (message_count != num_messages) { + fprintf(stderr, "\n\n*** Parser didn't see 3 messages only %d *** \n", num_messages); + abort(); + } + + if (!message_eq(0, 0, r1)) abort(); + if (message_count > 1 && !message_eq(1, 0, r2)) abort(); + if (message_count > 2 && !message_eq(2, 0, r3)) abort(); +} + +/* SCAN through every possible breaking to make sure the + * parser can handle getting the content in any chunks that + * might come from the socket + */ +void +test_scan (const struct message *r1, const struct message *r2, const struct message *r3) +{ + char total[80*1024] = "\0"; + char buf1[80*1024] = "\0"; + char buf2[80*1024] = "\0"; + char buf3[80*1024] = "\0"; + + strcat(total, r1->raw); + strcat(total, r2->raw); + strcat(total, r3->raw); + + size_t read; + + int total_len = strlen(total); + + int total_ops = 2 * (total_len - 1) * (total_len - 2) / 2; + int ops = 0 ; + + size_t buf1_len, buf2_len, buf3_len; + int message_count = count_parsed_messages(3, r1, r2, r3); + + int i,j,type_both; + for (type_both = 0; type_both < 2; type_both ++ ) { + for (j = 2; j < total_len; j ++ ) { + for (i = 1; i < j; i ++ ) { + + if (ops % 1000 == 0) { + printf("\b\b\b\b%3.0f%%", 100 * (float)ops /(float)total_ops); + fflush(stdout); + } + ops += 1; + + parser_init(type_both ? HTTP_BOTH : r1->type); + + buf1_len = i; + strlncpy(buf1, sizeof(buf1), total, buf1_len); + buf1[buf1_len] = 0; + + buf2_len = j - i; + strlncpy(buf2, sizeof(buf1), total+i, buf2_len); + buf2[buf2_len] = 0; + + buf3_len = total_len - j; + strlncpy(buf3, sizeof(buf1), total+j, buf3_len); + buf3[buf3_len] = 0; + + assert(num_messages == 0); + messages[0].headers_complete_cb_called = FALSE; + + read = parse(buf1, buf1_len); + + if (!messages[0].headers_complete_cb_called && parser.nread != read) { + print_error(buf1, read); + goto error; + } + + if (parser.upgrade) goto test; + + if (read != buf1_len) { + print_error(buf1, read); + goto error; + } + + read += parse(buf2, buf2_len); + + if (parser.upgrade) goto test; + + if (read != buf1_len + buf2_len) { + print_error(buf2, read); + goto error; + } + + read += parse(buf3, buf3_len); + + if (parser.upgrade) goto test; + + if (read != buf1_len + buf2_len + buf3_len) { + print_error(buf3, read); + goto error; + } + + parse(NULL, 0); + +test: + if (parser.upgrade) { + upgrade_message_fix(total, read, 3, r1, r2, r3); + } + + if (message_count != num_messages) { + fprintf(stderr, "\n\nParser didn't see %d messages only %d\n", + message_count, num_messages); + goto error; + } + + if (!message_eq(0, 0, r1)) { + fprintf(stderr, "\n\nError matching messages[0] in test_scan.\n"); + goto error; + } + + if (message_count > 1 && !message_eq(1, 0, r2)) { + fprintf(stderr, "\n\nError matching messages[1] in test_scan.\n"); + goto error; + } + + if (message_count > 2 && !message_eq(2, 0, r3)) { + fprintf(stderr, "\n\nError matching messages[2] in test_scan.\n"); + goto error; + } + } + } + } + puts("\b\b\b\b100%"); + return; + + error: + fprintf(stderr, "i=%d j=%d\n", i, j); + fprintf(stderr, "buf1 (%u) %s\n\n", (unsigned int)buf1_len, buf1); + fprintf(stderr, "buf2 (%u) %s\n\n", (unsigned int)buf2_len , buf2); + fprintf(stderr, "buf3 (%u) %s\n", (unsigned int)buf3_len, buf3); + abort(); +} + +// user required to free the result +// string terminated by \0 +char * +create_large_chunked_message (int body_size_in_kb, const char* headers) +{ + int i; + size_t wrote = 0; + size_t headers_len = strlen(headers); + size_t bufsize = headers_len + (5+1024+2)*body_size_in_kb + 6; + char * buf = malloc(bufsize); + + memcpy(buf, headers, headers_len); + wrote += headers_len; + + for (i = 0; i < body_size_in_kb; i++) { + // write 1kb chunk into the body. + memcpy(buf + wrote, "400\r\n", 5); + wrote += 5; + memset(buf + wrote, 'C', 1024); + wrote += 1024; + strcpy(buf + wrote, "\r\n"); + wrote += 2; + } + + memcpy(buf + wrote, "0\r\n\r\n", 6); + wrote += 6; + assert(wrote == bufsize); + + return buf; +} + +/* Verify that we can pause parsing at any of the bytes in the + * message and still get the result that we're expecting. */ +void +test_message_pause (const struct message *msg) +{ + char *buf = (char*) msg->raw; + size_t buflen = strlen(msg->raw); + size_t nread; + + parser_init(msg->type); + + do { + nread = parse_pause(buf, buflen); + + // We can only set the upgrade buffer once we've gotten our message + // completion callback. + if (messages[0].message_complete_cb_called && + msg->upgrade && + parser.upgrade) { + messages[0].upgrade = buf + nread; + goto test; + } + + if (nread < buflen) { + + // Not much do to if we failed a strict-mode check + if (HTTP_PARSER_ERRNO(&parser) == HPE_STRICT) { + return; + } + + assert (HTTP_PARSER_ERRNO(&parser) == HPE_PAUSED); + } + + buf += nread; + buflen -= nread; + http_parser_pause(&parser, 0); + } while (buflen > 0); + + nread = parse_pause(NULL, 0); + assert (nread == 0); + +test: + if (num_messages != 1) { + printf("\n*** num_messages != 1 after testing '%s' ***\n\n", msg->name); + abort(); + } + + if(!message_eq(0, 0, msg)) abort(); +} + +/* Verify that body and next message won't be parsed in responses to CONNECT */ +void +test_message_connect (const struct message *msg) +{ + char *buf = (char*) msg->raw; + size_t buflen = strlen(msg->raw); + + parser_init(msg->type); + + parse_connect(buf, buflen); + + if (num_messages != 1) { + printf("\n*** num_messages != 1 after testing '%s' ***\n\n", msg->name); + abort(); + } + + if(!message_eq(0, 1, msg)) abort(); +} + +int +main (void) +{ + unsigned i, j, k; + unsigned long version; + unsigned major; + unsigned minor; + unsigned patch; + + version = http_parser_version(); + major = (version >> 16) & 255; + minor = (version >> 8) & 255; + patch = version & 255; + printf("http_parser v%u.%u.%u (0x%06lx)\n", major, minor, patch, version); + + printf("sizeof(http_parser) = %u\n", (unsigned int)sizeof(http_parser)); + + //// API + test_preserve_data(); + test_parse_url(); + test_method_str(); + test_status_str(); + + //// NREAD + test_header_nread_value(); + + //// OVERFLOW CONDITIONS + test_no_overflow_parse_url(); + + test_header_overflow_error(HTTP_REQUEST); + test_no_overflow_long_body(HTTP_REQUEST, 1000); + test_no_overflow_long_body(HTTP_REQUEST, 100000); + + test_header_overflow_error(HTTP_RESPONSE); + test_no_overflow_long_body(HTTP_RESPONSE, 1000); + test_no_overflow_long_body(HTTP_RESPONSE, 100000); + + test_header_content_length_overflow_error(); + test_chunk_content_length_overflow_error(); + + //// HEADER FIELD CONDITIONS + test_double_content_length_error(HTTP_REQUEST); + test_chunked_content_length_error(HTTP_REQUEST); + test_header_cr_no_lf_error(HTTP_REQUEST); + test_invalid_header_field_token_error(HTTP_REQUEST); + test_invalid_header_field_content_error(HTTP_REQUEST); + test_double_content_length_error(HTTP_RESPONSE); + test_chunked_content_length_error(HTTP_RESPONSE); + test_header_cr_no_lf_error(HTTP_RESPONSE); + test_invalid_header_field_token_error(HTTP_RESPONSE); + test_invalid_header_field_content_error(HTTP_RESPONSE); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length:\r\n" // empty + "\r\n", + HPE_INVALID_CONTENT_LENGTH, + HTTP_REQUEST); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 42 \r\n" // Note the surrounding whitespace. + "\r\n", + HPE_OK, + HTTP_REQUEST); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 4 2\r\n" + "\r\n", + HPE_INVALID_CONTENT_LENGTH, + HTTP_REQUEST); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 13 37\r\n" + "\r\n", + HPE_INVALID_CONTENT_LENGTH, + HTTP_REQUEST); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 42\r\n" + " Hello world!\r\n", + HPE_INVALID_CONTENT_LENGTH, + HTTP_REQUEST); + + test_simple_type( + "POST / HTTP/1.1\r\n" + "Content-Length: 42\r\n" + " \r\n", + HPE_OK, + HTTP_REQUEST); + + //// RESPONSES + + test_simple_type("HTP/1.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + test_simple_type("HTTP/01.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + test_simple_type("HTTP/11.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + test_simple_type("HTTP/1.01 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + test_simple_type("HTTP/1.1\t200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + test_simple_type("\rHTTP/1.1\t200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE); + + for (i = 0; i < ARRAY_SIZE(responses); i++) { + test_message(&responses[i]); + } + + for (i = 0; i < ARRAY_SIZE(responses); i++) { + test_message_pause(&responses[i]); + } + + for (i = 0; i < ARRAY_SIZE(responses); i++) { + test_message_connect(&responses[i]); + } + + for (i = 0; i < ARRAY_SIZE(responses); i++) { + if (!responses[i].should_keep_alive) continue; + for (j = 0; j < ARRAY_SIZE(responses); j++) { + if (!responses[j].should_keep_alive) continue; + for (k = 0; k < ARRAY_SIZE(responses); k++) { + test_multiple3(&responses[i], &responses[j], &responses[k]); + } + } + } + + test_message_count_body(&responses[NO_HEADERS_NO_BODY_404]); + test_message_count_body(&responses[TRAILING_SPACE_ON_CHUNKED_BODY]); + + // test very large chunked response + { + char * msg = create_large_chunked_message(31337, + "HTTP/1.0 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "Content-Type: text/plain\r\n" + "\r\n"); + struct message large_chunked = + {.name= "large chunked" + ,.type= HTTP_RESPONSE + ,.raw= msg + ,.should_keep_alive= FALSE + ,.message_complete_on_eof= FALSE + ,.http_major= 1 + ,.http_minor= 0 + ,.status_code= 200 + ,.response_status= "OK" + ,.num_headers= 2 + ,.headers= + { { "Transfer-Encoding", "chunked" } + , { "Content-Type", "text/plain" } + } + ,.body_size= 31337*1024 + ,.num_chunks_complete= 31338 + }; + for (i = 0; i < MAX_CHUNKS; i++) { + large_chunked.chunk_lengths[i] = 1024; + } + test_message_count_body(&large_chunked); + free(msg); + } + + + + printf("response scan 1/2 "); + test_scan( &responses[TRAILING_SPACE_ON_CHUNKED_BODY] + , &responses[NO_BODY_HTTP10_KA_204] + , &responses[NO_REASON_PHRASE] + ); + + printf("response scan 2/2 "); + test_scan( &responses[BONJOUR_MADAME_FR] + , &responses[UNDERSTORE_HEADER_KEY] + , &responses[NO_CARRIAGE_RET] + ); + + puts("responses okay"); + + + /// REQUESTS + + test_simple("GET / IHTTP/1.0\r\n\r\n", HPE_INVALID_CONSTANT); + test_simple("GET / ICE/1.0\r\n\r\n", HPE_INVALID_CONSTANT); + test_simple("GET / HTP/1.1\r\n\r\n", HPE_INVALID_VERSION); + test_simple("GET / HTTP/01.1\r\n\r\n", HPE_INVALID_VERSION); + test_simple("GET / HTTP/11.1\r\n\r\n", HPE_INVALID_VERSION); + test_simple("GET / HTTP/1.01\r\n\r\n", HPE_INVALID_VERSION); + + test_simple("GET / HTTP/1.0\r\nHello: w\1rld\r\n\r\n", HPE_INVALID_HEADER_TOKEN); + test_simple("GET / HTTP/1.0\r\nHello: woooo\2rld\r\n\r\n", HPE_INVALID_HEADER_TOKEN); + + // Extended characters - see nodejs/test/parallel/test-http-headers-obstext.js + test_simple("GET / HTTP/1.1\r\n" + "Test: Düsseldorf\r\n", + HPE_OK); + + // Well-formed but incomplete + test_simple("GET / HTTP/1.1\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 6\r\n" + "\r\n" + "fooba", + HPE_OK); + + static const char *all_methods[] = { + "DELETE", + "GET", + "HEAD", + "POST", + "PUT", + //"CONNECT", //CONNECT can't be tested like other methods, it's a tunnel + "OPTIONS", + "TRACE", + "COPY", + "LOCK", + "MKCOL", + "MOVE", + "PROPFIND", + "PROPPATCH", + "SEARCH", + "UNLOCK", + "BIND", + "REBIND", + "UNBIND", + "ACL", + "REPORT", + "MKACTIVITY", + "CHECKOUT", + "MERGE", + "M-SEARCH", + "NOTIFY", + "SUBSCRIBE", + "UNSUBSCRIBE", + "PATCH", + "PURGE", + "MKCALENDAR", + "LINK", + "UNLINK", + 0 }; + const char **this_method; + for (this_method = all_methods; *this_method; this_method++) { + char buf[200]; + sprintf(buf, "%s / HTTP/1.1\r\n\r\n", *this_method); + test_simple(buf, HPE_OK); + } + + static const char *bad_methods[] = { + "ASDF", + "C******", + "COLA", + "GEM", + "GETA", + "M****", + "MKCOLA", + "PROPPATCHA", + "PUN", + "PX", + "SA", + "hello world", + 0 }; + for (this_method = bad_methods; *this_method; this_method++) { + char buf[200]; + sprintf(buf, "%s / HTTP/1.1\r\n\r\n", *this_method); + test_simple(buf, HPE_INVALID_METHOD); + } + + // illegal header field name line folding + test_simple("GET / HTTP/1.1\r\n" + "name\r\n" + " : value\r\n" + "\r\n", + HPE_INVALID_HEADER_TOKEN); + + const char *dumbluck2 = + "GET / HTTP/1.1\r\n" + "X-SSL-Nonsense: -----BEGIN CERTIFICATE-----\r\n" + "\tMIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n" + "\tETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n" + "\tAkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n" + "\tdWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n" + "\tSzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n" + "\tBAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n" + "\tBQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n" + "\tW51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n" + "\tgW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n" + "\t0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n" + "\tu2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n" + "\twgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n" + "\tA1UdEwEB/wQCMAAwEQYJYIZIAYb4QgHTTPAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n" + "\tBglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n" + "\tVR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n" + "\tloCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n" + "\taWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n" + "\t9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n" + "\tIjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n" + "\tBgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n" + "\tcHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4QgEDBDAWLmh0\r\n" + "\tdHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC5jcmwwPwYD\r\n" + "\tVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n" + "\tY3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n" + "\tXCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n" + "\tUO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n" + "\thTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n" + "\twTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n" + "\tYhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n" + "\tRA==\r\n" + "\t-----END CERTIFICATE-----\r\n" + "\r\n"; + test_simple(dumbluck2, HPE_OK); + + const char *corrupted_connection = + "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Connection\r\033\065\325eep-Alive\r\n" + "Accept-Encoding: gzip\r\n" + "\r\n"; + test_simple(corrupted_connection, HPE_INVALID_HEADER_TOKEN); + + const char *corrupted_header_name = + "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "X-Some-Header\r\033\065\325eep-Alive\r\n" + "Accept-Encoding: gzip\r\n" + "\r\n"; + test_simple(corrupted_header_name, HPE_INVALID_HEADER_TOKEN); + +#if 0 + // NOTE(Wed Nov 18 11:57:27 CET 2009) this seems okay. we just read body + // until EOF. + // + // no content-length + // error if there is a body without content length + const char *bad_get_no_headers_no_body = "GET /bad_get_no_headers_no_body/world HTTP/1.1\r\n" + "Accept: */*\r\n" + "\r\n" + "HELLO"; + test_simple(bad_get_no_headers_no_body, 0); +#endif + /* TODO sending junk and large headers gets rejected */ + + + /* check to make sure our predefined requests are okay */ + for (i = 0; i < ARRAY_SIZE(requests); i++) { + test_message(&requests[i]); + } + + for (i = 0; i < ARRAY_SIZE(requests); i++) { + test_message_pause(&requests[i]); + } + + for (i = 0; i < ARRAY_SIZE(requests); i++) { + if (!requests[i].should_keep_alive) continue; + for (j = 0; j < ARRAY_SIZE(requests); j++) { + if (!requests[j].should_keep_alive) continue; + for (k = 0; k < ARRAY_SIZE(requests); k++) { + test_multiple3(&requests[i], &requests[j], &requests[k]); + } + } + } + + printf("request scan 1/4 "); + test_scan( &requests[GET_NO_HEADERS_NO_BODY] + , &requests[GET_ONE_HEADER_NO_BODY] + , &requests[GET_NO_HEADERS_NO_BODY] + ); + + printf("request scan 2/4 "); + test_scan( &requests[POST_CHUNKED_ALL_YOUR_BASE] + , &requests[POST_IDENTITY_BODY_WORLD] + , &requests[GET_FUNKY_CONTENT_LENGTH] + ); + + printf("request scan 3/4 "); + test_scan( &requests[TWO_CHUNKS_MULT_ZERO_END] + , &requests[CHUNKED_W_TRAILING_HEADERS] + , &requests[CHUNKED_W_NONSENSE_AFTER_LENGTH] + ); + + printf("request scan 4/4 "); + test_scan( &requests[QUERY_URL_WITH_QUESTION_MARK_GET] + , &requests[PREFIX_NEWLINE_GET ] + , &requests[CONNECT_REQUEST] + ); + + puts("requests okay"); + + return 0; +} diff --git a/src/third_party/minizip/Makefile.am b/src/third_party/minizip/Makefile.am new file mode 100644 index 000000000..d343011eb --- /dev/null +++ b/src/third_party/minizip/Makefile.am @@ -0,0 +1,45 @@ +lib_LTLIBRARIES = libminizip.la + +if COND_DEMOS +bin_PROGRAMS = miniunzip minizip +endif + +zlib_top_srcdir = $(top_srcdir)/../.. +zlib_top_builddir = $(top_builddir)/../.. + +AM_CPPFLAGS = -I$(zlib_top_srcdir) +AM_LDFLAGS = -L$(zlib_top_builddir) + +if WIN32 +iowin32_src = iowin32.c +iowin32_h = iowin32.h +endif + +libminizip_la_SOURCES = \ + ioapi.c \ + mztools.c \ + unzip.c \ + zip.c \ + ${iowin32_src} + +libminizip_la_LDFLAGS = $(AM_LDFLAGS) -version-info 1:0:0 -lz + +minizip_includedir = $(includedir)/minizip +minizip_include_HEADERS = \ + crypt.h \ + ioapi.h \ + mztools.h \ + unzip.h \ + zip.h \ + ${iowin32_h} + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = minizip.pc + +EXTRA_PROGRAMS = miniunzip minizip + +miniunzip_SOURCES = miniunz.c +miniunzip_LDADD = libminizip.la + +minizip_SOURCES = minizip.c +minizip_LDADD = libminizip.la -lz diff --git a/src/third_party/minizip/MiniZip64_Changes.txt b/src/third_party/minizip/MiniZip64_Changes.txt new file mode 100644 index 000000000..13a1bd91a --- /dev/null +++ b/src/third_party/minizip/MiniZip64_Changes.txt @@ -0,0 +1,6 @@ + +MiniZip 1.1 was derrived from MiniZip at version 1.01f + +Change in 1.0 (Okt 2009) + - **TODO - Add history** + diff --git a/src/third_party/minizip/MiniZip64_info.txt b/src/third_party/minizip/MiniZip64_info.txt new file mode 100644 index 000000000..57d715242 --- /dev/null +++ b/src/third_party/minizip/MiniZip64_info.txt @@ -0,0 +1,74 @@ +MiniZip - Copyright (c) 1998-2010 - by Gilles Vollant - version 1.1 64 bits from Mathias Svensson + +Introduction +--------------------- +MiniZip 1.1 is built from MiniZip 1.0 by Gilles Vollant ( http://www.winimage.com/zLibDll/minizip.html ) + +When adding ZIP64 support into minizip it would result into risk of breaking compatibility with minizip 1.0. +All possible work was done for compatibility. + + +Background +--------------------- +When adding ZIP64 support Mathias Svensson found that Even Rouault have added ZIP64 +support for unzip.c into minizip for a open source project called gdal ( http://www.gdal.org/ ) + +That was used as a starting point. And after that ZIP64 support was added to zip.c +some refactoring and code cleanup was also done. + + +Changed from MiniZip 1.0 to MiniZip 1.1 +--------------------------------------- +* Added ZIP64 support for unzip ( by Even Rouault ) +* Added ZIP64 support for zip ( by Mathias Svensson ) +* Reverted some changed that Even Rouault did. +* Bunch of patches received from Gulles Vollant that he received for MiniZip from various users. +* Added unzip patch for BZIP Compression method (patch create by Daniel Borca) +* Added BZIP Compress method for zip +* Did some refactoring and code cleanup + + +Credits + + Gilles Vollant - Original MiniZip author + Even Rouault - ZIP64 unzip Support + Daniel Borca - BZip Compression method support in unzip + Mathias Svensson - ZIP64 zip support + Mathias Svensson - BZip Compression method support in zip + + Resources + + ZipLayout http://result42.com/projects/ZipFileLayout + Command line tool for Windows that shows the layout and information of the headers in a zip archive. + Used when debugging and validating the creation of zip files using MiniZip64 + + + ZIP App Note http://www.pkware.com/documents/casestudies/APPNOTE.TXT + Zip File specification + + +Notes. + * To be able to use BZip compression method in zip64.c or unzip64.c the BZIP2 lib is needed and HAVE_BZIP2 need to be defined. + +License +---------------------------------------------------------- + Condition of use and distribution are the same than zlib : + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + +---------------------------------------------------------- + diff --git a/src/third_party/minizip/README b/src/third_party/minizip/README new file mode 100644 index 000000000..d84ce1133 --- /dev/null +++ b/src/third_party/minizip/README @@ -0,0 +1,44 @@ +SHA: cacf7f1d4e3d44d871b605da3b647f07d718623f +URL: https://github.com/madler/zlib/tree/cacf7f1d4e3d44d871b605da3b647f07d718623f +Commit Date: 2017/01/15 +Patched for C90 compliant code: See c90_patch.diff + +git clone https://github.com/madler/zlib.git /tmp/zlib +cp -R /tmp/zlib/contrib/minizip . + +minizip/ by Gilles Vollant + Mini zip and unzip based on zlib + Includes Zip64 support by Mathias Svensson + See http://www.winimage.com/zLibDll/minizip.html + +Copyright notice: + + (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +If you use the zlib library in a product, we would appreciate *not* receiving +lengthy legal documents to sign. The sources are provided for free but without +warranty of any kind. The library has been entirely written by Jean-loup +Gailly and Mark Adler; it does not include third-party code. + +If you redistribute modified sources, we would appreciate that you include in +the file ChangeLog history information documenting your changes. Please read +the FAQ for more information on the distribution of modified source versions. diff --git a/src/third_party/minizip/c90_patch.diff b/src/third_party/minizip/c90_patch.diff new file mode 100644 index 000000000..0f0dc2e73 Binary files /dev/null and b/src/third_party/minizip/c90_patch.diff differ diff --git a/src/third_party/minizip/configure.ac b/src/third_party/minizip/configure.ac new file mode 100644 index 000000000..5b1197097 --- /dev/null +++ b/src/third_party/minizip/configure.ac @@ -0,0 +1,32 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_INIT([minizip], [1.2.11], [bugzilla.redhat.com]) +AC_CONFIG_SRCDIR([minizip.c]) +AM_INIT_AUTOMAKE([foreign]) +LT_INIT + +AC_MSG_CHECKING([whether to build example programs]) +AC_ARG_ENABLE([demos], AC_HELP_STRING([--enable-demos], [build example programs])) +AM_CONDITIONAL([COND_DEMOS], [test "$enable_demos" = yes]) +if test "$enable_demos" = yes +then + AC_MSG_RESULT([yes]) +else + AC_MSG_RESULT([no]) +fi + +case "${host}" in + *-mingw* | mingw*) + WIN32="yes" + ;; + *) + ;; +esac +AM_CONDITIONAL([WIN32], [test "${WIN32}" = "yes"]) + + +AC_SUBST([HAVE_UNISTD_H], [0]) +AC_CHECK_HEADER([unistd.h], [HAVE_UNISTD_H=1], []) +AC_CONFIG_FILES([Makefile minizip.pc]) +AC_OUTPUT diff --git a/src/third_party/minizip/crypt.h b/src/third_party/minizip/crypt.h new file mode 100644 index 000000000..3aa4a8f57 --- /dev/null +++ b/src/third_party/minizip/crypt.h @@ -0,0 +1,135 @@ +/* crypt.h -- base code for crypt/uncrypt ZIPfile + + + Version 1.01e, February 12th, 2005 + + Copyright (C) 1998-2005 Gilles Vollant + + This code is a modified version of crypting code in Infozip distribution + + The encryption/decryption parts of this source code (as opposed to the + non-echoing password parts) were originally written in Europe. The + whole source package can be freely distributed, including from the USA. + (Prior to January 2000, re-export from the US was a violation of US law.) + + This encryption code is a direct transcription of the algorithm from + Roger Schlafly, described by Phil Katz in the file appnote.txt. This + file (appnote.txt) is distributed with the PKZIP program (even in the + version without encryption capabilities). + + If you don't need crypting in your application, just define symbols + NOCRYPT and NOUNCRYPT. + + This code support the "Traditional PKWARE Encryption". + + The new AES encryption added on Zip format by Winzip (see the page + http://www.winzip.com/aes_info.htm ) and PKWare PKZip 5.x Strong + Encryption is not supported. +*/ + +#if ZLIB_VERNUM < 0x1270 +typedef unsigned long z_crc_t; +#endif + +#define CRC32(c, b) ((*(pcrc_32_tab+(((int)(c) ^ (b)) & 0xff))) ^ ((c) >> 8)) + +/*********************************************************************** + * Return the next byte in the pseudo-random sequence + */ +static int decrypt_byte(unsigned long* pkeys, const z_crc_t* pcrc_32_tab) +{ + unsigned temp; /* POTENTIAL BUG: temp*(temp^1) may overflow in an + * unpredictable manner on 16-bit systems; not a problem + * with any known compiler so far, though */ + + temp = ((unsigned)(*(pkeys+2)) & 0xffff) | 2; + return (int)(((temp * (temp ^ 1)) >> 8) & 0xff); +} + +/*********************************************************************** + * Update the encryption keys with the next byte of plain text + */ +static int update_keys(unsigned long* pkeys,const z_crc_t* pcrc_32_tab,int c) +{ + (*(pkeys+0)) = CRC32((*(pkeys+0)), c); + (*(pkeys+1)) += (*(pkeys+0)) & 0xff; + (*(pkeys+1)) = (*(pkeys+1)) * 134775813L + 1; + { + register int keyshift = (int)((*(pkeys+1)) >> 24); + (*(pkeys+2)) = CRC32((*(pkeys+2)), keyshift); + } + return c; +} + + +/*********************************************************************** + * Initialize the encryption keys and the random header according to + * the given password. + */ +static void init_keys(const char* passwd,unsigned long* pkeys,const z_crc_t* pcrc_32_tab) +{ + *(pkeys+0) = 305419896L; + *(pkeys+1) = 591751049L; + *(pkeys+2) = 878082192L; + while (*passwd != '\0') { + update_keys(pkeys,pcrc_32_tab,(int)*passwd); + passwd++; + } +} + +#define zdecode(pkeys,pcrc_32_tab,c) \ + (update_keys(pkeys,pcrc_32_tab,c ^= decrypt_byte(pkeys,pcrc_32_tab))) + +#define zencode(pkeys,pcrc_32_tab,c,t) \ + (t=decrypt_byte(pkeys,pcrc_32_tab), update_keys(pkeys,pcrc_32_tab,c), t^(c)) + +#ifdef INCLUDECRYPTINGCODE_IFCRYPTALLOWED + +#define RAND_HEAD_LEN 12 + /* "last resort" source for second part of crypt seed pattern */ +# ifndef ZCR_SEED2 +# define ZCR_SEED2 3141592654UL /* use PI as default pattern */ +# endif + +static int crypthead(const char* passwd, /* password string */ + unsigned char* buf, /* where to write header */ + int bufSize, + unsigned long* pkeys, + const z_crc_t* pcrc_32_tab, + unsigned long crcForCrypting) +{ + int n; /* index in random header */ + int t; /* temporary */ + int c; /* random byte */ + unsigned char header[RAND_HEAD_LEN-2]; /* random header */ + static unsigned calls = 0; /* ensure different random header each time */ + + if (bufSize> 7) & 0xff; + header[n] = (unsigned char)zencode(pkeys, pcrc_32_tab, c, t); + } + /* Encrypt random header (last two bytes is high word of crc) */ + init_keys(passwd, pkeys, pcrc_32_tab); + for (n = 0; n < RAND_HEAD_LEN-2; n++) + { + buf[n] = (unsigned char)zencode(pkeys, pcrc_32_tab, header[n], t); + } + buf[n++] = (unsigned char)zencode(pkeys, pcrc_32_tab, (int)(crcForCrypting >> 16) & 0xff, t); + buf[n++] = (unsigned char)zencode(pkeys, pcrc_32_tab, (int)(crcForCrypting >> 24) & 0xff, t); + return n; +} + +#endif diff --git a/src/third_party/minizip/ioapi.c b/src/third_party/minizip/ioapi.c new file mode 100644 index 000000000..82299e02c --- /dev/null +++ b/src/third_party/minizip/ioapi.c @@ -0,0 +1,247 @@ +/* ioapi.h -- IO base function header for compress/uncompress .zip + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + +*/ + +#if defined(_WIN32) && (!(defined(_CRT_SECURE_NO_WARNINGS))) + #define _CRT_SECURE_NO_WARNINGS +#endif + +#if defined(__APPLE__) || defined(IOAPI_NO_64) +/* In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions */ +#define FOPEN_FUNC(filename, mode) fopen(filename, mode) +#define FTELLO_FUNC(stream) ftello(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko(stream, offset, origin) +#else +#define FOPEN_FUNC(filename, mode) fopen64(filename, mode) +#define FTELLO_FUNC(stream) ftello64(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko64(stream, offset, origin) +#endif + + +#include "ioapi.h" + +voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode) +{ + if (pfilefunc->zfile_func64.zopen64_file != NULL) + return (*(pfilefunc->zfile_func64.zopen64_file)) (pfilefunc->zfile_func64.opaque,filename,mode); + else + { + return (*(pfilefunc->zopen32_file))(pfilefunc->zfile_func64.opaque,(const char*)filename,mode); + } +} + +long call_zseek64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin) +{ + if (pfilefunc->zfile_func64.zseek64_file != NULL) + return (*(pfilefunc->zfile_func64.zseek64_file)) (pfilefunc->zfile_func64.opaque,filestream,offset,origin); + else + { + uLong offsetTruncated = (uLong)offset; + if (offsetTruncated != offset) + return -1; + else + return (*(pfilefunc->zseek32_file))(pfilefunc->zfile_func64.opaque,filestream,offsetTruncated,origin); + } +} + +ZPOS64_T call_ztell64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream) +{ + if (pfilefunc->zfile_func64.zseek64_file != NULL) + return (*(pfilefunc->zfile_func64.ztell64_file)) (pfilefunc->zfile_func64.opaque,filestream); + else + { + uLong tell_uLong = (*(pfilefunc->ztell32_file))(pfilefunc->zfile_func64.opaque,filestream); + if ((tell_uLong) == MAXU32) + return (ZPOS64_T)-1; + else + return tell_uLong; + } +} + +void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32) +{ + p_filefunc64_32->zfile_func64.zopen64_file = NULL; + p_filefunc64_32->zopen32_file = p_filefunc32->zopen_file; + p_filefunc64_32->zfile_func64.zerror_file = p_filefunc32->zerror_file; + p_filefunc64_32->zfile_func64.zread_file = p_filefunc32->zread_file; + p_filefunc64_32->zfile_func64.zwrite_file = p_filefunc32->zwrite_file; + p_filefunc64_32->zfile_func64.ztell64_file = NULL; + p_filefunc64_32->zfile_func64.zseek64_file = NULL; + p_filefunc64_32->zfile_func64.zclose_file = p_filefunc32->zclose_file; + p_filefunc64_32->zfile_func64.zerror_file = p_filefunc32->zerror_file; + p_filefunc64_32->zfile_func64.opaque = p_filefunc32->opaque; + p_filefunc64_32->zseek32_file = p_filefunc32->zseek_file; + p_filefunc64_32->ztell32_file = p_filefunc32->ztell_file; +} + + + +static voidpf ZCALLBACK fopen_file_func OF((voidpf opaque, const char* filename, int mode)); +static uLong ZCALLBACK fread_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); +static uLong ZCALLBACK fwrite_file_func OF((voidpf opaque, voidpf stream, const void* buf,uLong size)); +static ZPOS64_T ZCALLBACK ftell64_file_func OF((voidpf opaque, voidpf stream)); +static long ZCALLBACK fseek64_file_func OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); +static int ZCALLBACK fclose_file_func OF((voidpf opaque, voidpf stream)); +static int ZCALLBACK ferror_file_func OF((voidpf opaque, voidpf stream)); + +static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, int mode) +{ + FILE* file = NULL; + const char* mode_fopen = NULL; + if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) + mode_fopen = "rb"; + else + if (mode & ZLIB_FILEFUNC_MODE_EXISTING) + mode_fopen = "r+b"; + else + if (mode & ZLIB_FILEFUNC_MODE_CREATE) + mode_fopen = "wb"; + + if ((filename!=NULL) && (mode_fopen != NULL)) + file = fopen(filename, mode_fopen); + return file; +} + +static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, int mode) +{ + FILE* file = NULL; + const char* mode_fopen = NULL; + if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) + mode_fopen = "rb"; + else + if (mode & ZLIB_FILEFUNC_MODE_EXISTING) + mode_fopen = "r+b"; + else + if (mode & ZLIB_FILEFUNC_MODE_CREATE) + mode_fopen = "wb"; + + if ((filename!=NULL) && (mode_fopen != NULL)) + file = FOPEN_FUNC((const char*)filename, mode_fopen); + return file; +} + + +static uLong ZCALLBACK fread_file_func (voidpf opaque, voidpf stream, void* buf, uLong size) +{ + uLong ret; + ret = (uLong)fread(buf, 1, (size_t)size, (FILE *)stream); + return ret; +} + +static uLong ZCALLBACK fwrite_file_func (voidpf opaque, voidpf stream, const void* buf, uLong size) +{ + uLong ret; + ret = (uLong)fwrite(buf, 1, (size_t)size, (FILE *)stream); + return ret; +} + +static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream) +{ + long ret; + ret = ftell((FILE *)stream); + return ret; +} + + +static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque, voidpf stream) +{ + ZPOS64_T ret; + ret = FTELLO_FUNC((FILE *)stream); + return ret; +} + +static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offset, int origin) +{ + int fseek_origin=0; + long ret; + switch (origin) + { + case ZLIB_FILEFUNC_SEEK_CUR : + fseek_origin = SEEK_CUR; + break; + case ZLIB_FILEFUNC_SEEK_END : + fseek_origin = SEEK_END; + break; + case ZLIB_FILEFUNC_SEEK_SET : + fseek_origin = SEEK_SET; + break; + default: return -1; + } + ret = 0; + if (fseek((FILE *)stream, offset, fseek_origin) != 0) + ret = -1; + return ret; +} + +static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T offset, int origin) +{ + int fseek_origin=0; + long ret; + switch (origin) + { + case ZLIB_FILEFUNC_SEEK_CUR : + fseek_origin = SEEK_CUR; + break; + case ZLIB_FILEFUNC_SEEK_END : + fseek_origin = SEEK_END; + break; + case ZLIB_FILEFUNC_SEEK_SET : + fseek_origin = SEEK_SET; + break; + default: return -1; + } + ret = 0; + + if(FSEEKO_FUNC((FILE *)stream, offset, fseek_origin) != 0) + ret = -1; + + return ret; +} + + +static int ZCALLBACK fclose_file_func (voidpf opaque, voidpf stream) +{ + int ret; + ret = fclose((FILE *)stream); + return ret; +} + +static int ZCALLBACK ferror_file_func (voidpf opaque, voidpf stream) +{ + int ret; + ret = ferror((FILE *)stream); + return ret; +} + +void fill_fopen_filefunc (pzlib_filefunc_def) + zlib_filefunc_def* pzlib_filefunc_def; +{ + pzlib_filefunc_def->zopen_file = fopen_file_func; + pzlib_filefunc_def->zread_file = fread_file_func; + pzlib_filefunc_def->zwrite_file = fwrite_file_func; + pzlib_filefunc_def->ztell_file = ftell_file_func; + pzlib_filefunc_def->zseek_file = fseek_file_func; + pzlib_filefunc_def->zclose_file = fclose_file_func; + pzlib_filefunc_def->zerror_file = ferror_file_func; + pzlib_filefunc_def->opaque = NULL; +} + +void fill_fopen64_filefunc (zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = fopen64_file_func; + pzlib_filefunc_def->zread_file = fread_file_func; + pzlib_filefunc_def->zwrite_file = fwrite_file_func; + pzlib_filefunc_def->ztell64_file = ftell64_file_func; + pzlib_filefunc_def->zseek64_file = fseek64_file_func; + pzlib_filefunc_def->zclose_file = fclose_file_func; + pzlib_filefunc_def->zerror_file = ferror_file_func; + pzlib_filefunc_def->opaque = NULL; +} diff --git a/src/third_party/minizip/ioapi.h b/src/third_party/minizip/ioapi.h new file mode 100644 index 000000000..4cff190ac --- /dev/null +++ b/src/third_party/minizip/ioapi.h @@ -0,0 +1,208 @@ +/* ioapi.h -- IO base function header for compress/uncompress .zip + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + Changes + + Oct-2009 - Defined ZPOS64_T to fpos_t on windows and u_int64_t on linux. (might need to find a better why for this) + Oct-2009 - Change to fseeko64, ftello64 and fopen64 so large files would work on linux. + More if/def section may be needed to support other platforms + Oct-2009 - Defined fxxxx64 calls to normal fopen/ftell/fseek so they would compile on windows. + (but you should use iowin32.c for windows instead) + +*/ + +#ifndef _ZLIBIOAPI64_H +#define _ZLIBIOAPI64_H + +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) + + /* Linux needs this to support file operation on files larger then 4+GB + But might need better if/def to select just the platforms that needs them. */ + + #ifndef __USE_FILE_OFFSET64 + #define __USE_FILE_OFFSET64 + #endif + #ifndef __USE_LARGEFILE64 + #define __USE_LARGEFILE64 + #endif + #ifndef _LARGEFILE64_SOURCE + #define _LARGEFILE64_SOURCE + #endif + #ifndef _FILE_OFFSET_BIT + #define _FILE_OFFSET_BIT 64 + #endif + +#endif + +#include +#include +#include "zlib.h" + +#if defined(USE_FILE32API) +#define fopen64 fopen +#define ftello64 ftell +#define fseeko64 fseek +#else +#ifdef __FreeBSD__ +#define fopen64 fopen +#define ftello64 ftello +#define fseeko64 fseeko +#endif +#ifdef _MSC_VER + #define fopen64 fopen + #if (_MSC_VER >= 1400) && (!(defined(NO_MSCVER_FILE64_FUNC))) + #define ftello64 _ftelli64 + #define fseeko64 _fseeki64 + #else /* old MSC */ + #define ftello64 ftell + #define fseeko64 fseek + #endif +#endif +#endif + +/* +#ifndef ZPOS64_T + #ifdef _WIN32 + #define ZPOS64_T fpos_t + #else + #include + #define ZPOS64_T uint64_t + #endif +#endif +*/ + +#ifdef HAVE_MINIZIP64_CONF_H +#include "mz64conf.h" +#endif + +/* a type choosen by DEFINE */ +#ifdef HAVE_64BIT_INT_CUSTOM +typedef 64BIT_INT_CUSTOM_TYPE ZPOS64_T; +#else +#ifdef HAS_STDINT_H +#include "stdint.h" +typedef uint64_t ZPOS64_T; +#else + +/* Maximum unsigned 32-bit value used as placeholder for zip64 */ +#define MAXU32 0xffffffff + +#if defined(_MSC_VER) || defined(__BORLANDC__) +typedef unsigned __int64 ZPOS64_T; +#else +typedef unsigned long long int ZPOS64_T; +#endif +#endif +#endif + + + +#ifdef __cplusplus +extern "C" { +#endif + + +#define ZLIB_FILEFUNC_SEEK_CUR (1) +#define ZLIB_FILEFUNC_SEEK_END (2) +#define ZLIB_FILEFUNC_SEEK_SET (0) + +#define ZLIB_FILEFUNC_MODE_READ (1) +#define ZLIB_FILEFUNC_MODE_WRITE (2) +#define ZLIB_FILEFUNC_MODE_READWRITEFILTER (3) + +#define ZLIB_FILEFUNC_MODE_EXISTING (4) +#define ZLIB_FILEFUNC_MODE_CREATE (8) + + +#ifndef ZCALLBACK + #if (defined(WIN32) || defined(_WIN32) || defined (WINDOWS) || defined (_WINDOWS)) && defined(CALLBACK) && defined (USEWINDOWS_CALLBACK) + #define ZCALLBACK CALLBACK + #else + #define ZCALLBACK + #endif +#endif + + + + +typedef voidpf (ZCALLBACK *open_file_func) OF((voidpf opaque, const char* filename, int mode)); +typedef uLong (ZCALLBACK *read_file_func) OF((voidpf opaque, voidpf stream, void* buf, uLong size)); +typedef uLong (ZCALLBACK *write_file_func) OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); +typedef int (ZCALLBACK *close_file_func) OF((voidpf opaque, voidpf stream)); +typedef int (ZCALLBACK *testerror_file_func) OF((voidpf opaque, voidpf stream)); + +typedef long (ZCALLBACK *tell_file_func) OF((voidpf opaque, voidpf stream)); +typedef long (ZCALLBACK *seek_file_func) OF((voidpf opaque, voidpf stream, uLong offset, int origin)); + + +/* here is the "old" 32 bits structure structure */ +typedef struct zlib_filefunc_def_s +{ + open_file_func zopen_file; + read_file_func zread_file; + write_file_func zwrite_file; + tell_file_func ztell_file; + seek_file_func zseek_file; + close_file_func zclose_file; + testerror_file_func zerror_file; + voidpf opaque; +} zlib_filefunc_def; + +typedef ZPOS64_T (ZCALLBACK *tell64_file_func) OF((voidpf opaque, voidpf stream)); +typedef long (ZCALLBACK *seek64_file_func) OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); +typedef voidpf (ZCALLBACK *open64_file_func) OF((voidpf opaque, const void* filename, int mode)); + +typedef struct zlib_filefunc64_def_s +{ + open64_file_func zopen64_file; + read_file_func zread_file; + write_file_func zwrite_file; + tell64_file_func ztell64_file; + seek64_file_func zseek64_file; + close_file_func zclose_file; + testerror_file_func zerror_file; + voidpf opaque; +} zlib_filefunc64_def; + +void fill_fopen64_filefunc OF((zlib_filefunc64_def* pzlib_filefunc_def)); +void fill_fopen_filefunc OF((zlib_filefunc_def* pzlib_filefunc_def)); + +/* now internal definition, only for zip.c and unzip.h */ +typedef struct zlib_filefunc64_32_def_s +{ + zlib_filefunc64_def zfile_func64; + open_file_func zopen32_file; + tell_file_func ztell32_file; + seek_file_func zseek32_file; +} zlib_filefunc64_32_def; + + +#define ZREAD64(filefunc,filestream,buf,size) ((*((filefunc).zfile_func64.zread_file)) ((filefunc).zfile_func64.opaque,filestream,buf,size)) +#define ZWRITE64(filefunc,filestream,buf,size) ((*((filefunc).zfile_func64.zwrite_file)) ((filefunc).zfile_func64.opaque,filestream,buf,size)) +/* #define ZTELL64(filefunc,filestream) ((*((filefunc).ztell64_file)) ((filefunc).opaque,filestream)) */ +/* #define ZSEEK64(filefunc,filestream,pos,mode) ((*((filefunc).zseek64_file)) ((filefunc).opaque,filestream,pos,mode)) */ +#define ZCLOSE64(filefunc,filestream) ((*((filefunc).zfile_func64.zclose_file)) ((filefunc).zfile_func64.opaque,filestream)) +#define ZERROR64(filefunc,filestream) ((*((filefunc).zfile_func64.zerror_file)) ((filefunc).zfile_func64.opaque,filestream)) + +voidpf call_zopen64 OF((const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode)); +long call_zseek64 OF((const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin)); +ZPOS64_T call_ztell64 OF((const zlib_filefunc64_32_def* pfilefunc,voidpf filestream)); + +void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32); + +#define ZOPEN64(filefunc,filename,mode) (call_zopen64((&(filefunc)),(filename),(mode))) +#define ZTELL64(filefunc,filestream) (call_ztell64((&(filefunc)),(filestream))) +#define ZSEEK64(filefunc,filestream,pos,mode) (call_zseek64((&(filefunc)),(filestream),(pos),(mode))) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/minizip/iowin32.c b/src/third_party/minizip/iowin32.c new file mode 100644 index 000000000..de2c026c9 --- /dev/null +++ b/src/third_party/minizip/iowin32.c @@ -0,0 +1,462 @@ +/* iowin32.c -- IO base function header for compress/uncompress .zip + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + +*/ + +#include + +#include "zlib.h" +#include "ioapi.h" +#include "iowin32.h" + +#ifndef INVALID_HANDLE_VALUE +#define INVALID_HANDLE_VALUE (0xFFFFFFFF) +#endif + +#ifndef INVALID_SET_FILE_POINTER +#define INVALID_SET_FILE_POINTER ((DWORD)-1) +#endif + + +/* see Include/shared/winapifamily.h in the Windows Kit */ +#if defined(WINAPI_FAMILY_PARTITION) && (!(defined(IOWIN32_USING_WINRT_API))) +#if WINAPI_FAMILY_ONE_PARTITION(WINAPI_FAMILY, WINAPI_PARTITION_APP) +#define IOWIN32_USING_WINRT_API 1 +#endif +#endif + +voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode)); +uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); +uLong ZCALLBACK win32_write_file_func OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); +ZPOS64_T ZCALLBACK win32_tell64_file_func OF((voidpf opaque, voidpf stream)); +long ZCALLBACK win32_seek64_file_func OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); +int ZCALLBACK win32_close_file_func OF((voidpf opaque, voidpf stream)); +int ZCALLBACK win32_error_file_func OF((voidpf opaque, voidpf stream)); + +typedef struct +{ + HANDLE hf; + int error; +} WIN32FILE_IOWIN; + + +static void win32_translate_open_mode(int mode, + DWORD* lpdwDesiredAccess, + DWORD* lpdwCreationDisposition, + DWORD* lpdwShareMode, + DWORD* lpdwFlagsAndAttributes) +{ + *lpdwDesiredAccess = *lpdwShareMode = *lpdwFlagsAndAttributes = *lpdwCreationDisposition = 0; + + if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) + { + *lpdwDesiredAccess = GENERIC_READ; + *lpdwCreationDisposition = OPEN_EXISTING; + *lpdwShareMode = FILE_SHARE_READ; + } + else if (mode & ZLIB_FILEFUNC_MODE_EXISTING) + { + *lpdwDesiredAccess = GENERIC_WRITE | GENERIC_READ; + *lpdwCreationDisposition = OPEN_EXISTING; + } + else if (mode & ZLIB_FILEFUNC_MODE_CREATE) + { + *lpdwDesiredAccess = GENERIC_WRITE | GENERIC_READ; + *lpdwCreationDisposition = CREATE_ALWAYS; + } +} + +static voidpf win32_build_iowin(HANDLE hFile) +{ + voidpf ret=NULL; + + if ((hFile != NULL) && (hFile != INVALID_HANDLE_VALUE)) + { + WIN32FILE_IOWIN w32fiow; + w32fiow.hf = hFile; + w32fiow.error = 0; + ret = malloc(sizeof(WIN32FILE_IOWIN)); + + if (ret==NULL) + CloseHandle(hFile); + else + *((WIN32FILE_IOWIN*)ret) = w32fiow; + } + return ret; +} + +voidpf ZCALLBACK win32_open64_file_func (voidpf opaque,const void* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API +#ifdef UNICODE + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile2((LPCTSTR)filename, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + { + WCHAR filenameW[FILENAME_MAX + 0x200 + 1]; + MultiByteToWideChar(CP_ACP,0,(const char*)filename,-1,filenameW,FILENAME_MAX + 0x200); + hFile = CreateFile2(filenameW, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); + } +#endif +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile((LPCTSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +voidpf ZCALLBACK win32_open64_file_funcA (voidpf opaque,const void* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API + if ((filename!=NULL) && (dwDesiredAccess != 0)) + { + WCHAR filenameW[FILENAME_MAX + 0x200 + 1]; + MultiByteToWideChar(CP_ACP,0,(const char*)filename,-1,filenameW,FILENAME_MAX + 0x200); + hFile = CreateFile2(filenameW, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); + } +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFileA((LPCSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +voidpf ZCALLBACK win32_open64_file_funcW (voidpf opaque,const void* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile2((LPCWSTR)filename, dwDesiredAccess, dwShareMode, dwCreationDisposition,NULL); +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFileW((LPCWSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +voidpf ZCALLBACK win32_open_file_func (voidpf opaque,const char* filename,int mode) +{ + const char* mode_fopen = NULL; + DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; + HANDLE hFile = NULL; + + win32_translate_open_mode(mode,&dwDesiredAccess,&dwCreationDisposition,&dwShareMode,&dwFlagsAndAttributes); + +#ifdef IOWIN32_USING_WINRT_API +#ifdef UNICODE + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile2((LPCTSTR)filename, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + { + WCHAR filenameW[FILENAME_MAX + 0x200 + 1]; + MultiByteToWideChar(CP_ACP,0,(const char*)filename,-1,filenameW,FILENAME_MAX + 0x200); + hFile = CreateFile2(filenameW, dwDesiredAccess, dwShareMode, dwCreationDisposition, NULL); + } +#endif +#else + if ((filename!=NULL) && (dwDesiredAccess != 0)) + hFile = CreateFile((LPCTSTR)filename, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); +#endif + + return win32_build_iowin(hFile); +} + + +uLong ZCALLBACK win32_read_file_func (voidpf opaque, voidpf stream, void* buf,uLong size) +{ + uLong ret=0; + HANDLE hFile = NULL; + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream) -> hf; + + if (hFile != NULL) + { + if (!ReadFile(hFile, buf, size, &ret, NULL)) + { + DWORD dwErr = GetLastError(); + if (dwErr == ERROR_HANDLE_EOF) + dwErr = 0; + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + } + } + + return ret; +} + + +uLong ZCALLBACK win32_write_file_func (voidpf opaque,voidpf stream,const void* buf,uLong size) +{ + uLong ret=0; + HANDLE hFile = NULL; + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream) -> hf; + + if (hFile != NULL) + { + if (!WriteFile(hFile, buf, size, &ret, NULL)) + { + DWORD dwErr = GetLastError(); + if (dwErr == ERROR_HANDLE_EOF) + dwErr = 0; + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + } + } + + return ret; +} + +static BOOL MySetFilePointerEx(HANDLE hFile, LARGE_INTEGER pos, LARGE_INTEGER *newPos, DWORD dwMoveMethod) +{ +#ifdef IOWIN32_USING_WINRT_API + return SetFilePointerEx(hFile, pos, newPos, dwMoveMethod); +#else + LONG lHigh = pos.HighPart; + DWORD dwNewPos = SetFilePointer(hFile, pos.LowPart, &lHigh, dwMoveMethod); + BOOL fOk = TRUE; + if (dwNewPos == 0xFFFFFFFF) + if (GetLastError() != NO_ERROR) + fOk = FALSE; + if ((newPos != NULL) && (fOk)) + { + newPos->LowPart = dwNewPos; + newPos->HighPart = lHigh; + } + return fOk; +#endif +} + +long ZCALLBACK win32_tell_file_func (voidpf opaque,voidpf stream) +{ + long ret=-1; + HANDLE hFile = NULL; + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream) -> hf; + if (hFile != NULL) + { + LARGE_INTEGER pos; + pos.QuadPart = 0; + + if (!MySetFilePointerEx(hFile, pos, &pos, FILE_CURRENT)) + { + DWORD dwErr = GetLastError(); + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + ret = -1; + } + else + ret=(long)pos.LowPart; + } + return ret; +} + +ZPOS64_T ZCALLBACK win32_tell64_file_func (voidpf opaque, voidpf stream) +{ + ZPOS64_T ret= (ZPOS64_T)-1; + HANDLE hFile = NULL; + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream)->hf; + + if (hFile) + { + LARGE_INTEGER pos; + pos.QuadPart = 0; + + if (!MySetFilePointerEx(hFile, pos, &pos, FILE_CURRENT)) + { + DWORD dwErr = GetLastError(); + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + ret = (ZPOS64_T)-1; + } + else + ret=pos.QuadPart; + } + return ret; +} + + +long ZCALLBACK win32_seek_file_func (voidpf opaque,voidpf stream,uLong offset,int origin) +{ + DWORD dwMoveMethod=0xFFFFFFFF; + HANDLE hFile = NULL; + + long ret=-1; + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream) -> hf; + switch (origin) + { + case ZLIB_FILEFUNC_SEEK_CUR : + dwMoveMethod = FILE_CURRENT; + break; + case ZLIB_FILEFUNC_SEEK_END : + dwMoveMethod = FILE_END; + break; + case ZLIB_FILEFUNC_SEEK_SET : + dwMoveMethod = FILE_BEGIN; + break; + default: return -1; + } + + if (hFile != NULL) + { + LARGE_INTEGER pos; + pos.QuadPart = offset; + if (!MySetFilePointerEx(hFile, pos, NULL, dwMoveMethod)) + { + DWORD dwErr = GetLastError(); + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + ret = -1; + } + else + ret=0; + } + return ret; +} + +long ZCALLBACK win32_seek64_file_func (voidpf opaque, voidpf stream,ZPOS64_T offset,int origin) +{ + DWORD dwMoveMethod=0xFFFFFFFF; + HANDLE hFile = NULL; + long ret=-1; + + if (stream!=NULL) + hFile = ((WIN32FILE_IOWIN*)stream)->hf; + + switch (origin) + { + case ZLIB_FILEFUNC_SEEK_CUR : + dwMoveMethod = FILE_CURRENT; + break; + case ZLIB_FILEFUNC_SEEK_END : + dwMoveMethod = FILE_END; + break; + case ZLIB_FILEFUNC_SEEK_SET : + dwMoveMethod = FILE_BEGIN; + break; + default: return -1; + } + + if (hFile) + { + LARGE_INTEGER pos; + pos.QuadPart = offset; + if (!MySetFilePointerEx(hFile, pos, NULL, dwMoveMethod)) + { + DWORD dwErr = GetLastError(); + ((WIN32FILE_IOWIN*)stream) -> error=(int)dwErr; + ret = -1; + } + else + ret=0; + } + return ret; +} + +int ZCALLBACK win32_close_file_func (voidpf opaque, voidpf stream) +{ + int ret=-1; + + if (stream!=NULL) + { + HANDLE hFile; + hFile = ((WIN32FILE_IOWIN*)stream) -> hf; + if (hFile != NULL) + { + CloseHandle(hFile); + ret=0; + } + free(stream); + } + return ret; +} + +int ZCALLBACK win32_error_file_func (voidpf opaque,voidpf stream) +{ + int ret=-1; + if (stream!=NULL) + { + ret = ((WIN32FILE_IOWIN*)stream) -> error; + } + return ret; +} + +void fill_win32_filefunc (zlib_filefunc_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen_file = win32_open_file_func; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell_file = win32_tell_file_func; + pzlib_filefunc_def->zseek_file = win32_seek_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; +} + +void fill_win32_filefunc64(zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = win32_open64_file_func; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell64_file = win32_tell64_file_func; + pzlib_filefunc_def->zseek64_file = win32_seek64_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; +} + + +void fill_win32_filefunc64A(zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = win32_open64_file_funcA; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell64_file = win32_tell64_file_func; + pzlib_filefunc_def->zseek64_file = win32_seek64_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; +} + + +void fill_win32_filefunc64W(zlib_filefunc64_def* pzlib_filefunc_def) +{ + pzlib_filefunc_def->zopen64_file = win32_open64_file_funcW; + pzlib_filefunc_def->zread_file = win32_read_file_func; + pzlib_filefunc_def->zwrite_file = win32_write_file_func; + pzlib_filefunc_def->ztell64_file = win32_tell64_file_func; + pzlib_filefunc_def->zseek64_file = win32_seek64_file_func; + pzlib_filefunc_def->zclose_file = win32_close_file_func; + pzlib_filefunc_def->zerror_file = win32_error_file_func; + pzlib_filefunc_def->opaque = NULL; +} diff --git a/src/third_party/minizip/iowin32.h b/src/third_party/minizip/iowin32.h new file mode 100644 index 000000000..0ca0969a7 --- /dev/null +++ b/src/third_party/minizip/iowin32.h @@ -0,0 +1,28 @@ +/* iowin32.h -- IO base function header for compress/uncompress .zip + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + +*/ + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +void fill_win32_filefunc OF((zlib_filefunc_def* pzlib_filefunc_def)); +void fill_win32_filefunc64 OF((zlib_filefunc64_def* pzlib_filefunc_def)); +void fill_win32_filefunc64A OF((zlib_filefunc64_def* pzlib_filefunc_def)); +void fill_win32_filefunc64W OF((zlib_filefunc64_def* pzlib_filefunc_def)); + +#ifdef __cplusplus +} +#endif diff --git a/src/third_party/minizip/make_vms.com b/src/third_party/minizip/make_vms.com new file mode 100644 index 000000000..9ac13a98f --- /dev/null +++ b/src/third_party/minizip/make_vms.com @@ -0,0 +1,25 @@ +$ if f$search("ioapi.h_orig") .eqs. "" then copy ioapi.h ioapi.h_orig +$ open/write zdef vmsdefs.h +$ copy sys$input: zdef +$ deck +#define unix +#define fill_zlib_filefunc64_32_def_from_filefunc32 fillzffunc64from +#define Write_Zip64EndOfCentralDirectoryLocator Write_Zip64EoDLocator +#define Write_Zip64EndOfCentralDirectoryRecord Write_Zip64EoDRecord +#define Write_EndOfCentralDirectoryRecord Write_EoDRecord +$ eod +$ close zdef +$ copy vmsdefs.h,ioapi.h_orig ioapi.h +$ cc/include=[--]/prefix=all ioapi.c +$ cc/include=[--]/prefix=all miniunz.c +$ cc/include=[--]/prefix=all unzip.c +$ cc/include=[--]/prefix=all minizip.c +$ cc/include=[--]/prefix=all zip.c +$ link miniunz,unzip,ioapi,[--]libz.olb/lib +$ link minizip,zip,ioapi,[--]libz.olb/lib +$ mcr []minizip test minizip_info.txt +$ mcr []miniunz -l test.zip +$ rename minizip_info.txt; minizip_info.txt_old +$ mcr []miniunz test.zip +$ delete test.zip;* +$exit diff --git a/src/third_party/minizip/miniunz.c b/src/third_party/minizip/miniunz.c new file mode 100644 index 000000000..76f8c242a --- /dev/null +++ b/src/third_party/minizip/miniunz.c @@ -0,0 +1,660 @@ +/* + miniunz.c + Version 1.1, February 14h, 2010 + sample part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) +*/ + +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) + #ifndef __USE_FILE_OFFSET64 + #define __USE_FILE_OFFSET64 + #endif + #ifndef __USE_LARGEFILE64 + #define __USE_LARGEFILE64 + #endif + #ifndef _LARGEFILE64_SOURCE + #define _LARGEFILE64_SOURCE + #endif + #ifndef _FILE_OFFSET_BIT + #define _FILE_OFFSET_BIT 64 + #endif +#endif + +#ifdef __APPLE__ +/* In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions */ +#define FOPEN_FUNC(filename, mode) fopen(filename, mode) +#define FTELLO_FUNC(stream) ftello(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko(stream, offset, origin) +#else +#define FOPEN_FUNC(filename, mode) fopen64(filename, mode) +#define FTELLO_FUNC(stream) ftello64(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko64(stream, offset, origin) +#endif + + +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# include +# include +#else +# include +# include +#endif + + +#include "unzip.h" + +#define CASESENSITIVITY (0) +#define WRITEBUFFERSIZE (8192) +#define MAXFILENAME (256) + +#ifdef _WIN32 +#define USEWIN32IOAPI +#include "iowin32.h" +#endif +/* + mini unzip, demo of unzip package + + usage : + Usage : miniunz [-exvlo] file.zip [file_to_extract] [-d extractdir] + + list the file in the zipfile, and print the content of FILE_ID.ZIP or README.TXT + if it exists +*/ + + +/* change_file_date : change the date/time of a file + filename : the filename of the file where date/time must be modified + dosdate : the new date at the MSDos format (4 bytes) + tmu_date : the SAME new date at the tm_unz format */ +void change_file_date(filename,dosdate,tmu_date) + const char *filename; + uLong dosdate; + tm_unz tmu_date; +{ +#ifdef _WIN32 + HANDLE hFile; + FILETIME ftm,ftLocal,ftCreate,ftLastAcc,ftLastWrite; + + hFile = CreateFileA(filename,GENERIC_READ | GENERIC_WRITE, + 0,NULL,OPEN_EXISTING,0,NULL); + GetFileTime(hFile,&ftCreate,&ftLastAcc,&ftLastWrite); + DosDateTimeToFileTime((WORD)(dosdate>>16),(WORD)dosdate,&ftLocal); + LocalFileTimeToFileTime(&ftLocal,&ftm); + SetFileTime(hFile,&ftm,&ftLastAcc,&ftm); + CloseHandle(hFile); +#else +#ifdef unix || __APPLE__ + struct utimbuf ut; + struct tm newdate; + newdate.tm_sec = tmu_date.tm_sec; + newdate.tm_min=tmu_date.tm_min; + newdate.tm_hour=tmu_date.tm_hour; + newdate.tm_mday=tmu_date.tm_mday; + newdate.tm_mon=tmu_date.tm_mon; + if (tmu_date.tm_year > 1900) + newdate.tm_year=tmu_date.tm_year - 1900; + else + newdate.tm_year=tmu_date.tm_year ; + newdate.tm_isdst=-1; + + ut.actime=ut.modtime=mktime(&newdate); + utime(filename,&ut); +#endif +#endif +} + + +/* mymkdir and change_file_date are not 100 % portable + As I don't know well Unix, I wait feedback for the unix portion */ + +int mymkdir(dirname) + const char* dirname; +{ + int ret=0; +#ifdef _WIN32 + ret = _mkdir(dirname); +#elif unix + ret = mkdir (dirname,0775); +#elif __APPLE__ + ret = mkdir (dirname,0775); +#endif + return ret; +} + +int makedir (newdir) + char *newdir; +{ + char *buffer ; + char *p; + int len = (int)strlen(newdir); + + if (len <= 0) + return 0; + + buffer = (char*)malloc(len+1); + if (buffer==NULL) + { + printf("Error allocating memory\n"); + return UNZ_INTERNALERROR; + } + strcpy(buffer,newdir); + + if (buffer[len-1] == '/') { + buffer[len-1] = '\0'; + } + if (mymkdir(buffer) == 0) + { + free(buffer); + return 1; + } + + p = buffer+1; + while (1) + { + char hold; + + while(*p && *p != '\\' && *p != '/') + p++; + hold = *p; + *p = 0; + if ((mymkdir(buffer) == -1) && (errno == ENOENT)) + { + printf("couldn't create directory %s\n",buffer); + free(buffer); + return 0; + } + if (hold == 0) + break; + *p++ = hold; + } + free(buffer); + return 1; +} + +void do_banner() +{ + printf("MiniUnz 1.01b, demo of zLib + Unz package written by Gilles Vollant\n"); + printf("more info at http://www.winimage.com/zLibDll/unzip.html\n\n"); +} + +void do_help() +{ + printf("Usage : miniunz [-e] [-x] [-v] [-l] [-o] [-p password] file.zip [file_to_extr.] [-d extractdir]\n\n" \ + " -e Extract without pathname (junk paths)\n" \ + " -x Extract with pathname\n" \ + " -v list files\n" \ + " -l list files\n" \ + " -d directory to extract into\n" \ + " -o overwrite files without prompting\n" \ + " -p extract crypted file using password\n\n"); +} + +void Display64BitsSize(ZPOS64_T n, int size_char) +{ + /* to avoid compatibility problem , we do here the conversion */ + char number[21]; + int offset=19; + int pos_string = 19; + number[20]=0; + for (;;) { + number[offset]=(char)((n%10)+'0'); + if (number[offset] != '0') + pos_string=offset; + n/=10; + if (offset==0) + break; + offset--; + } + { + int size_display_string = 19-pos_string; + while (size_char > size_display_string) + { + size_char--; + printf(" "); + } + } + + printf("%s",&number[pos_string]); +} + +int do_list(uf) + unzFile uf; +{ + uLong i; + unz_global_info64 gi; + int err; + + err = unzGetGlobalInfo64(uf,&gi); + if (err!=UNZ_OK) + printf("error %d with zipfile in unzGetGlobalInfo \n",err); + printf(" Length Method Size Ratio Date Time CRC-32 Name\n"); + printf(" ------ ------ ---- ----- ---- ---- ------ ----\n"); + for (i=0;i0) + ratio = (uLong)((file_info.compressed_size*100)/file_info.uncompressed_size); + + /* display a '*' if the file is crypted */ + if ((file_info.flag & 1) != 0) + charCrypt='*'; + + if (file_info.compression_method==0) + string_method="Stored"; + else + if (file_info.compression_method==Z_DEFLATED) + { + uInt iLevel=(uInt)((file_info.flag & 0x6)/2); + if (iLevel==0) + string_method="Defl:N"; + else if (iLevel==1) + string_method="Defl:X"; + else if ((iLevel==2) || (iLevel==3)) + string_method="Defl:F"; /* 2:fast , 3 : extra fast*/ + } + else + if (file_info.compression_method==Z_BZIP2ED) + { + string_method="BZip2 "; + } + else + string_method="Unkn. "; + + Display64BitsSize(file_info.uncompressed_size,7); + printf(" %6s%c",string_method,charCrypt); + Display64BitsSize(file_info.compressed_size,7); + printf(" %3lu%% %2.2lu-%2.2lu-%2.2lu %2.2lu:%2.2lu %8.8lx %s\n", + ratio, + (uLong)file_info.tmu_date.tm_mon + 1, + (uLong)file_info.tmu_date.tm_mday, + (uLong)file_info.tmu_date.tm_year % 100, + (uLong)file_info.tmu_date.tm_hour,(uLong)file_info.tmu_date.tm_min, + (uLong)file_info.crc,filename_inzip); + if ((i+1)='a') && (rep<='z')) + rep -= 0x20; + } + while ((rep!='Y') && (rep!='N') && (rep!='A')); + } + + if (rep == 'N') + skip = 1; + + if (rep == 'A') + *popt_overwrite=1; + } + + if ((skip==0) && (err==UNZ_OK)) + { + fout=FOPEN_FUNC(write_filename,"wb"); + /* some zipfile don't contain directory alone before file */ + if ((fout==NULL) && ((*popt_extract_without_path)==0) && + (filename_withoutpath!=(char*)filename_inzip)) + { + char c=*(filename_withoutpath-1); + *(filename_withoutpath-1)='\0'; + makedir(write_filename); + *(filename_withoutpath-1)=c; + fout=FOPEN_FUNC(write_filename,"wb"); + } + + if (fout==NULL) + { + printf("error opening %s\n",write_filename); + } + } + + if (fout!=NULL) + { + printf(" extracting: %s\n",write_filename); + + do + { + err = unzReadCurrentFile(uf,buf,size_buf); + if (err<0) + { + printf("error %d with zipfile in unzReadCurrentFile\n",err); + break; + } + if (err>0) + if (fwrite(buf,err,1,fout)!=1) + { + printf("error in writing extracted file\n"); + err=UNZ_ERRNO; + break; + } + } + while (err>0); + if (fout) + fclose(fout); + + if (err==0) + change_file_date(write_filename,file_info.dosDate, + file_info.tmu_date); + } + + if (err==UNZ_OK) + { + err = unzCloseCurrentFile (uf); + if (err!=UNZ_OK) + { + printf("error %d with zipfile in unzCloseCurrentFile\n",err); + } + } + else + unzCloseCurrentFile(uf); /* don't lose the error */ + } + + free(buf); + return err; +} + + +int do_extract(uf,opt_extract_without_path,opt_overwrite,password) + unzFile uf; + int opt_extract_without_path; + int opt_overwrite; + const char* password; +{ + uLong i; + unz_global_info64 gi; + int err; + FILE* fout=NULL; + + err = unzGetGlobalInfo64(uf,&gi); + if (err!=UNZ_OK) + printf("error %d with zipfile in unzGetGlobalInfo \n",err); + + for (i=0;i insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +miniunzip - uncompress and examine ZIP archives +.SH SYNOPSIS +.B miniunzip +.RI [ -exvlo ] +zipfile [ files_to_extract ] [-d tempdir] +.SH DESCRIPTION +.B minizip +is a simple tool which allows the extraction of compressed file +archives in the ZIP format used by the MS-DOS utility PKZIP. It was +written as a demonstration of the +.IR zlib (3) +library and therefore lack many of the features of the +.IR unzip (1) +program. +.SH OPTIONS +A number of options are supported. With the exception of +.BI \-d\ tempdir +these must be supplied before any +other arguments and are: +.TP +.BI \-l\ ,\ \-\-v +List the files in the archive without extracting them. +.TP +.B \-o +Overwrite files without prompting for confirmation. +.TP +.B \-x +Extract files (default). +.PP +The +.I zipfile +argument is the name of the archive to process. The next argument can be used +to specify a single file to extract from the archive. + +Lastly, the following option can be specified at the end of the command-line: +.TP +.BI \-d\ tempdir +Extract the archive in the directory +.I tempdir +rather than the current directory. +.SH SEE ALSO +.BR minizip (1), +.BR zlib (3), +.BR unzip (1). +.SH AUTHOR +This program was written by Gilles Vollant. This manual page was +written by Mark Brown . The -d tempdir option +was added by Dirk Eddelbuettel . diff --git a/src/third_party/minizip/minizip.1 b/src/third_party/minizip/minizip.1 new file mode 100644 index 000000000..1154484c1 --- /dev/null +++ b/src/third_party/minizip/minizip.1 @@ -0,0 +1,46 @@ +.\" Hey, EMACS: -*- nroff -*- +.TH minizip 1 "May 2, 2001" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +minizip - create ZIP archives +.SH SYNOPSIS +.B minizip +.RI [ -o ] +zipfile [ " files" ... ] +.SH DESCRIPTION +.B minizip +is a simple tool which allows the creation of compressed file archives +in the ZIP format used by the MS-DOS utility PKZIP. It was written as +a demonstration of the +.IR zlib (3) +library and therefore lack many of the features of the +.IR zip (1) +program. +.SH OPTIONS +The first argument supplied is the name of the ZIP archive to create or +.RI -o +in which case it is ignored and the second argument treated as the +name of the ZIP file. If the ZIP file already exists it will be +overwritten. +.PP +Subsequent arguments specify a list of files to place in the ZIP +archive. If none are specified then an empty archive will be created. +.SH SEE ALSO +.BR miniunzip (1), +.BR zlib (3), +.BR zip (1). +.SH AUTHOR +This program was written by Gilles Vollant. This manual page was +written by Mark Brown . + diff --git a/src/third_party/minizip/minizip.c b/src/third_party/minizip/minizip.c new file mode 100644 index 000000000..c463c8315 --- /dev/null +++ b/src/third_party/minizip/minizip.c @@ -0,0 +1,520 @@ +/* + minizip.c + Version 1.1, February 14h, 2010 + sample part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) +*/ + + +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) + #ifndef __USE_FILE_OFFSET64 + #define __USE_FILE_OFFSET64 + #endif + #ifndef __USE_LARGEFILE64 + #define __USE_LARGEFILE64 + #endif + #ifndef _LARGEFILE64_SOURCE + #define _LARGEFILE64_SOURCE + #endif + #ifndef _FILE_OFFSET_BIT + #define _FILE_OFFSET_BIT 64 + #endif +#endif + +#ifdef __APPLE__ +/* In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions */ +#define FOPEN_FUNC(filename, mode) fopen(filename, mode) +#define FTELLO_FUNC(stream) ftello(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko(stream, offset, origin) +#else +#define FOPEN_FUNC(filename, mode) fopen64(filename, mode) +#define FTELLO_FUNC(stream) ftello64(stream) +#define FSEEKO_FUNC(stream, offset, origin) fseeko64(stream, offset, origin) +#endif + + + +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# include +# include +#else +# include +# include +# include +# include +#endif + +#include "zip.h" + +#ifdef _WIN32 + #define USEWIN32IOAPI + #include "iowin32.h" +#endif + + + +#define WRITEBUFFERSIZE (16384) +#define MAXFILENAME (256) + +#ifdef _WIN32 +uLong filetime(f, tmzip, dt) + char *f; /* name of file to get info on */ + tm_zip *tmzip; /* return value: access, modific. and creation times */ + uLong *dt; /* dostime */ +{ + int ret = 0; + { + FILETIME ftLocal; + HANDLE hFind; + WIN32_FIND_DATAA ff32; + + hFind = FindFirstFileA(f,&ff32); + if (hFind != INVALID_HANDLE_VALUE) + { + FileTimeToLocalFileTime(&(ff32.ftLastWriteTime),&ftLocal); + FileTimeToDosDateTime(&ftLocal,((LPWORD)dt)+1,((LPWORD)dt)+0); + FindClose(hFind); + ret = 1; + } + } + return ret; +} +#else +#ifdef unix || __APPLE__ +uLong filetime(f, tmzip, dt) + char *f; /* name of file to get info on */ + tm_zip *tmzip; /* return value: access, modific. and creation times */ + uLong *dt; /* dostime */ +{ + int ret=0; + struct stat s; /* results of stat() */ + struct tm* filedate; + time_t tm_t=0; + + if (strcmp(f,"-")!=0) + { + char name[MAXFILENAME+1]; + int len = strlen(f); + if (len > MAXFILENAME) + len = MAXFILENAME; + + strncpy(name, f,MAXFILENAME-1); + /* strncpy doesnt append the trailing NULL, of the string is too long. */ + name[ MAXFILENAME ] = '\0'; + + if (name[len - 1] == '/') + name[len - 1] = '\0'; + /* not all systems allow stat'ing a file with / appended */ + if (stat(name,&s)==0) + { + tm_t = s.st_mtime; + ret = 1; + } + } + filedate = localtime(&tm_t); + + tmzip->tm_sec = filedate->tm_sec; + tmzip->tm_min = filedate->tm_min; + tmzip->tm_hour = filedate->tm_hour; + tmzip->tm_mday = filedate->tm_mday; + tmzip->tm_mon = filedate->tm_mon ; + tmzip->tm_year = filedate->tm_year; + + return ret; +} +#else +uLong filetime(f, tmzip, dt) + char *f; /* name of file to get info on */ + tm_zip *tmzip; /* return value: access, modific. and creation times */ + uLong *dt; /* dostime */ +{ + return 0; +} +#endif +#endif + + + + +int check_exist_file(filename) + const char* filename; +{ + FILE* ftestexist; + int ret = 1; + ftestexist = FOPEN_FUNC(filename,"rb"); + if (ftestexist==NULL) + ret = 0; + else + fclose(ftestexist); + return ret; +} + +void do_banner() +{ + printf("MiniZip 1.1, demo of zLib + MiniZip64 package, written by Gilles Vollant\n"); + printf("more info on MiniZip at http://www.winimage.com/zLibDll/minizip.html\n\n"); +} + +void do_help() +{ + printf("Usage : minizip [-o] [-a] [-0 to -9] [-p password] [-j] file.zip [files_to_add]\n\n" \ + " -o Overwrite existing file.zip\n" \ + " -a Append to existing file.zip\n" \ + " -0 Store only\n" \ + " -1 Compress faster\n" \ + " -9 Compress better\n\n" \ + " -j exclude path. store only the file name.\n\n"); +} + +/* calculate the CRC32 of a file, + because to encrypt a file, we need known the CRC32 of the file before */ +int getFileCrc(const char* filenameinzip,void*buf,unsigned long size_buf,unsigned long* result_crc) +{ + unsigned long calculate_crc=0; + int err=ZIP_OK; + FILE * fin = FOPEN_FUNC(filenameinzip,"rb"); + + unsigned long size_read = 0; + unsigned long total_read = 0; + if (fin==NULL) + { + err = ZIP_ERRNO; + } + + if (err == ZIP_OK) + do + { + err = ZIP_OK; + size_read = (int)fread(buf,1,size_buf,fin); + if (size_read < size_buf) + if (feof(fin)==0) + { + printf("error in reading %s\n",filenameinzip); + err = ZIP_ERRNO; + } + + if (size_read>0) + calculate_crc = crc32(calculate_crc,buf,size_read); + total_read += size_read; + + } while ((err == ZIP_OK) && (size_read>0)); + + if (fin) + fclose(fin); + + *result_crc=calculate_crc; + printf("file %s crc %lx\n", filenameinzip, calculate_crc); + return err; +} + +int isLargeFile(const char* filename) +{ + int largeFile = 0; + ZPOS64_T pos = 0; + FILE* pFile = FOPEN_FUNC(filename, "rb"); + + if(pFile != NULL) + { + int n = FSEEKO_FUNC(pFile, 0, SEEK_END); + pos = FTELLO_FUNC(pFile); + + printf("File : %s is %lld bytes\n", filename, pos); + + if(pos >= 0xffffffff) + largeFile = 1; + + fclose(pFile); + } + + return largeFile; +} + +int main(argc,argv) + int argc; + char *argv[]; +{ + int i; + int opt_overwrite=0; + int opt_compress_level=Z_DEFAULT_COMPRESSION; + int opt_exclude_path=0; + int zipfilenamearg = 0; + char filename_try[MAXFILENAME+16]; + int zipok; + int err=0; + int size_buf=0; + void* buf=NULL; + const char* password=NULL; + + + do_banner(); + if (argc==1) + { + do_help(); + return 0; + } + else + { + for (i=1;i='0') && (c<='9')) + opt_compress_level = c-'0'; + if ((c=='j') || (c=='J')) + opt_exclude_path = 1; + + if (((c=='p') || (c=='P')) && (i+1='a') && (rep<='z')) + rep -= 0x20; + } + while ((rep!='Y') && (rep!='N') && (rep!='A')); + if (rep=='N') + zipok = 0; + if (rep=='A') + opt_overwrite = 2; + } + } + + if (zipok==1) + { + zipFile zf; + int errclose; +# ifdef USEWIN32IOAPI + zlib_filefunc64_def ffunc; + fill_win32_filefunc64A(&ffunc); + zf = zipOpen2_64(filename_try,(opt_overwrite==2) ? 2 : 0,NULL,&ffunc); +# else + zf = zipOpen64(filename_try,(opt_overwrite==2) ? 2 : 0); +# endif + + if (zf == NULL) + { + printf("error opening %s\n",filename_try); + err= ZIP_ERRNO; + } + else + printf("creating %s\n",filename_try); + + for (i=zipfilenamearg+1;(i='0') || (argv[i][1]<='9'))) && + (strlen(argv[i]) == 2))) + { + FILE * fin; + int size_read; + const char* filenameinzip = argv[i]; + const char *savefilenameinzip; + zip_fileinfo zi; + unsigned long crcFile=0; + int zip64 = 0; + + zi.tmz_date.tm_sec = zi.tmz_date.tm_min = zi.tmz_date.tm_hour = + zi.tmz_date.tm_mday = zi.tmz_date.tm_mon = zi.tmz_date.tm_year = 0; + zi.dosDate = 0; + zi.internal_fa = 0; + zi.external_fa = 0; + filetime(filenameinzip,&zi.tmz_date,&zi.dosDate); + +/* + err = zipOpenNewFileInZip(zf,filenameinzip,&zi, + NULL,0,NULL,0,NULL / * comment * /, + (opt_compress_level != 0) ? Z_DEFLATED : 0, + opt_compress_level); +*/ + if ((password != NULL) && (err==ZIP_OK)) + err = getFileCrc(filenameinzip,buf,size_buf,&crcFile); + + zip64 = isLargeFile(filenameinzip); + + /* The path name saved, should not include a leading slash. */ + /*if it did, windows/xp and dynazip couldn't read the zip file. */ + savefilenameinzip = filenameinzip; + while( savefilenameinzip[0] == '\\' || savefilenameinzip[0] == '/' ) + { + savefilenameinzip++; + } + + /*should the zip file contain any path at all?*/ + if( opt_exclude_path ) + { + const char *tmpptr; + const char *lastslash = 0; + for( tmpptr = savefilenameinzip; *tmpptr; tmpptr++) + { + if( *tmpptr == '\\' || *tmpptr == '/') + { + lastslash = tmpptr; + } + } + if( lastslash != NULL ) + { + savefilenameinzip = lastslash+1; /* base filename follows last slash. */ + } + } + + /**/ + err = zipOpenNewFileInZip3_64(zf,savefilenameinzip,&zi, + NULL,0,NULL,0,NULL /* comment*/, + (opt_compress_level != 0) ? Z_DEFLATED : 0, + opt_compress_level,0, + /* -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, */ + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + password,crcFile, zip64); + + if (err != ZIP_OK) + printf("error in opening %s in zipfile\n",filenameinzip); + else + { + fin = FOPEN_FUNC(filenameinzip,"rb"); + if (fin==NULL) + { + err=ZIP_ERRNO; + printf("error in opening %s for reading\n",filenameinzip); + } + } + + if (err == ZIP_OK) + do + { + err = ZIP_OK; + size_read = (int)fread(buf,1,size_buf,fin); + if (size_read < size_buf) + if (feof(fin)==0) + { + printf("error in reading %s\n",filenameinzip); + err = ZIP_ERRNO; + } + + if (size_read>0) + { + err = zipWriteInFileInZip (zf,buf,size_read); + if (err<0) + { + printf("error in writing %s in the zipfile\n", + filenameinzip); + } + + } + } while ((err == ZIP_OK) && (size_read>0)); + + if (fin) + fclose(fin); + + if (err<0) + err=ZIP_ERRNO; + else + { + err = zipCloseFileInZip(zf); + if (err!=ZIP_OK) + printf("error in closing %s in the zipfile\n", + filenameinzip); + } + } + } + errclose = zipClose(zf,NULL); + if (errclose != ZIP_OK) + printf("error in closing %s\n",filename_try); + } + else + { + do_help(); + } + + free(buf); + return 0; +} diff --git a/src/third_party/minizip/minizip.pc.in b/src/third_party/minizip/minizip.pc.in new file mode 100644 index 000000000..69b5b7fdc --- /dev/null +++ b/src/third_party/minizip/minizip.pc.in @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@/minizip + +Name: minizip +Description: Minizip zip file manipulation library +Requires: +Version: @PACKAGE_VERSION@ +Libs: -L${libdir} -lminizip +Libs.private: -lz +Cflags: -I${includedir} diff --git a/src/third_party/minizip/mztools.c b/src/third_party/minizip/mztools.c new file mode 100644 index 000000000..ada9fa885 --- /dev/null +++ b/src/third_party/minizip/mztools.c @@ -0,0 +1,291 @@ +/* + Additional tools for Minizip + Code: Xavier Roche '2004 + License: Same as ZLIB (www.gzip.org) +*/ + +/* Code */ +#include +#include +#include +#include "zlib.h" +#include "unzip.h" + +#define READ_8(adr) ((unsigned char)*(adr)) +#define READ_16(adr) ( READ_8(adr) | (READ_8(adr+1) << 8) ) +#define READ_32(adr) ( READ_16(adr) | (READ_16((adr)+2) << 16) ) + +#define WRITE_8(buff, n) do { \ + *((unsigned char*)(buff)) = (unsigned char) ((n) & 0xff); \ +} while(0) +#define WRITE_16(buff, n) do { \ + WRITE_8((unsigned char*)(buff), n); \ + WRITE_8(((unsigned char*)(buff)) + 1, (n) >> 8); \ +} while(0) +#define WRITE_32(buff, n) do { \ + WRITE_16((unsigned char*)(buff), (n) & 0xffff); \ + WRITE_16((unsigned char*)(buff) + 2, (n) >> 16); \ +} while(0) + +extern int ZEXPORT unzRepair(file, fileOut, fileOutTmp, nRecovered, bytesRecovered) +const char* file; +const char* fileOut; +const char* fileOutTmp; +uLong* nRecovered; +uLong* bytesRecovered; +{ + int err = Z_OK; + FILE* fpZip = fopen(file, "rb"); + FILE* fpOut = fopen(fileOut, "wb"); + FILE* fpOutCD = fopen(fileOutTmp, "wb"); + if (fpZip != NULL && fpOut != NULL) { + int entries = 0; + uLong totalBytes = 0; + char header[30]; + char filename[1024]; + char extra[1024]; + int offset = 0; + int offsetCD = 0; + while ( fread(header, 1, 30, fpZip) == 30 ) { + int currentOffset = offset; + + /* File entry */ + if (READ_32(header) == 0x04034b50) { + unsigned int version = READ_16(header + 4); + unsigned int gpflag = READ_16(header + 6); + unsigned int method = READ_16(header + 8); + unsigned int filetime = READ_16(header + 10); + unsigned int filedate = READ_16(header + 12); + unsigned int crc = READ_32(header + 14); /* crc */ + unsigned int cpsize = READ_32(header + 18); /* compressed size */ + unsigned int uncpsize = READ_32(header + 22); /* uncompressed sz */ + unsigned int fnsize = READ_16(header + 26); /* file name length */ + unsigned int extsize = READ_16(header + 28); /* extra field length */ + filename[0] = extra[0] = '\0'; + + /* Header */ + if (fwrite(header, 1, 30, fpOut) == 30) { + offset += 30; + } else { + err = Z_ERRNO; + break; + } + + /* Filename */ + if (fnsize > 0) { + if (fnsize < sizeof(filename)) { + if (fread(filename, 1, fnsize, fpZip) == fnsize) { + if (fwrite(filename, 1, fnsize, fpOut) == fnsize) { + offset += fnsize; + } else { + err = Z_ERRNO; + break; + } + } else { + err = Z_ERRNO; + break; + } + } else { + err = Z_ERRNO; + break; + } + } else { + err = Z_STREAM_ERROR; + break; + } + + /* Extra field */ + if (extsize > 0) { + if (extsize < sizeof(extra)) { + if (fread(extra, 1, extsize, fpZip) == extsize) { + if (fwrite(extra, 1, extsize, fpOut) == extsize) { + offset += extsize; + } else { + err = Z_ERRNO; + break; + } + } else { + err = Z_ERRNO; + break; + } + } else { + err = Z_ERRNO; + break; + } + } + + /* Data */ + { + int dataSize = cpsize; + if (dataSize == 0) { + dataSize = uncpsize; + } + if (dataSize > 0) { + char* data = malloc(dataSize); + if (data != NULL) { + if ((int)fread(data, 1, dataSize, fpZip) == dataSize) { + if ((int)fwrite(data, 1, dataSize, fpOut) == dataSize) { + offset += dataSize; + totalBytes += dataSize; + } else { + err = Z_ERRNO; + } + } else { + err = Z_ERRNO; + } + free(data); + if (err != Z_OK) { + break; + } + } else { + err = Z_MEM_ERROR; + break; + } + } + } + + /* Central directory entry */ + { + char header[46]; + char* comment = ""; + int comsize = (int) strlen(comment); + WRITE_32(header, 0x02014b50); + WRITE_16(header + 4, version); + WRITE_16(header + 6, version); + WRITE_16(header + 8, gpflag); + WRITE_16(header + 10, method); + WRITE_16(header + 12, filetime); + WRITE_16(header + 14, filedate); + WRITE_32(header + 16, crc); + WRITE_32(header + 20, cpsize); + WRITE_32(header + 24, uncpsize); + WRITE_16(header + 28, fnsize); + WRITE_16(header + 30, extsize); + WRITE_16(header + 32, comsize); + WRITE_16(header + 34, 0); /* disk # */ + WRITE_16(header + 36, 0); /* int attrb */ + WRITE_32(header + 38, 0); /* ext attrb */ + WRITE_32(header + 42, currentOffset); + /* Header */ + if (fwrite(header, 1, 46, fpOutCD) == 46) { + offsetCD += 46; + + /* Filename */ + if (fnsize > 0) { + if (fwrite(filename, 1, fnsize, fpOutCD) == fnsize) { + offsetCD += fnsize; + } else { + err = Z_ERRNO; + break; + } + } else { + err = Z_STREAM_ERROR; + break; + } + + /* Extra field */ + if (extsize > 0) { + if (fwrite(extra, 1, extsize, fpOutCD) == extsize) { + offsetCD += extsize; + } else { + err = Z_ERRNO; + break; + } + } + + /* Comment field */ + if (comsize > 0) { + if ((int)fwrite(comment, 1, comsize, fpOutCD) == comsize) { + offsetCD += comsize; + } else { + err = Z_ERRNO; + break; + } + } + + + } else { + err = Z_ERRNO; + break; + } + } + + /* Success */ + entries++; + + } else { + break; + } + } + + /* Final central directory */ + { + int entriesZip = entries; + char header[22]; + char* comment = ""; /* "ZIP File recovered by zlib/minizip/mztools"; */ + int comsize = (int) strlen(comment); + if (entriesZip > 0xffff) { + entriesZip = 0xffff; + } + WRITE_32(header, 0x06054b50); + WRITE_16(header + 4, 0); /* disk # */ + WRITE_16(header + 6, 0); /* disk # */ + WRITE_16(header + 8, entriesZip); /* hack */ + WRITE_16(header + 10, entriesZip); /* hack */ + WRITE_32(header + 12, offsetCD); /* size of CD */ + WRITE_32(header + 16, offset); /* offset to CD */ + WRITE_16(header + 20, comsize); /* comment */ + + /* Header */ + if (fwrite(header, 1, 22, fpOutCD) == 22) { + + /* Comment field */ + if (comsize > 0) { + if ((int)fwrite(comment, 1, comsize, fpOutCD) != comsize) { + err = Z_ERRNO; + } + } + + } else { + err = Z_ERRNO; + } + } + + /* Final merge (file + central directory) */ + fclose(fpOutCD); + if (err == Z_OK) { + fpOutCD = fopen(fileOutTmp, "rb"); + if (fpOutCD != NULL) { + int nRead; + char buffer[8192]; + while ( (nRead = (int)fread(buffer, 1, sizeof(buffer), fpOutCD)) > 0) { + if ((int)fwrite(buffer, 1, nRead, fpOut) != nRead) { + err = Z_ERRNO; + break; + } + } + fclose(fpOutCD); + } + } + + /* Close */ + fclose(fpZip); + fclose(fpOut); + + /* Wipe temporary file */ + (void)remove(fileOutTmp); + + /* Number of recovered entries */ + if (err == Z_OK) { + if (nRecovered != NULL) { + *nRecovered = entries; + } + if (bytesRecovered != NULL) { + *bytesRecovered = totalBytes; + } + } + } else { + err = Z_STREAM_ERROR; + } + return err; +} diff --git a/src/third_party/minizip/mztools.h b/src/third_party/minizip/mztools.h new file mode 100644 index 000000000..a49a426ec --- /dev/null +++ b/src/third_party/minizip/mztools.h @@ -0,0 +1,37 @@ +/* + Additional tools for Minizip + Code: Xavier Roche '2004 + License: Same as ZLIB (www.gzip.org) +*/ + +#ifndef _zip_tools_H +#define _zip_tools_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _ZLIB_H +#include "zlib.h" +#endif + +#include "unzip.h" + +/* Repair a ZIP file (missing central directory) + file: file to recover + fileOut: output file after recovery + fileOutTmp: temporary file name used for recovery +*/ +extern int ZEXPORT unzRepair(const char* file, + const char* fileOut, + const char* fileOutTmp, + uLong* nRecovered, + uLong* bytesRecovered); + + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/src/third_party/minizip/unzip.c b/src/third_party/minizip/unzip.c new file mode 100644 index 000000000..e449b6a5b --- /dev/null +++ b/src/third_party/minizip/unzip.c @@ -0,0 +1,2125 @@ +/* unzip.c -- IO for uncompress .zip files using zlib + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + + ------------------------------------------------------------------------------------ + Decryption code comes from crypt.c by Info-ZIP but has been greatly reduced in terms of + compatibility with older software. The following is from the original crypt.c. + Code woven in by Terry Thorsen 1/2003. + + Copyright (c) 1990-2000 Info-ZIP. All rights reserved. + + See the accompanying file LICENSE, version 2000-Apr-09 or later + (the contents of which are also included in zip.h) for terms of use. + If, for some reason, all these files are missing, the Info-ZIP license + also may be found at: ftp://ftp.info-zip.org/pub/infozip/license.html + + crypt.c (full version) by Info-ZIP. Last revised: [see crypt.h] + + The encryption/decryption parts of this source code (as opposed to the + non-echoing password parts) were originally written in Europe. The + whole source package can be freely distributed, including from the USA. + (Prior to January 2000, re-export from the US was a violation of US law.) + + This encryption code is a direct transcription of the algorithm from + Roger Schlafly, described by Phil Katz in the file appnote.txt. This + file (appnote.txt) is distributed with the PKZIP program (even in the + version without encryption capabilities). + + ------------------------------------------------------------------------------------ + + Changes in unzip.c + + 2007-2008 - Even Rouault - Addition of cpl_unzGetCurrentFileZStreamPos + 2007-2008 - Even Rouault - Decoration of symbol names unz* -> cpl_unz* + 2007-2008 - Even Rouault - Remove old C style function prototypes + 2007-2008 - Even Rouault - Add unzip support for ZIP64 + + Copyright (C) 2007-2008 Even Rouault + + + Oct-2009 - Mathias Svensson - Removed cpl_* from symbol names (Even Rouault added them but since this is now moved to a new project (minizip64) I renamed them again). + Oct-2009 - Mathias Svensson - Fixed problem if uncompressed size was > 4G and compressed size was <4G + should only read the compressed/uncompressed size from the Zip64 format if + the size from normal header was 0xFFFFFFFF + Oct-2009 - Mathias Svensson - Applied some bug fixes from paches recived from Gilles Vollant + Oct-2009 - Mathias Svensson - Applied support to unzip files with compression mathod BZIP2 (bzip2 lib is required) + Patch created by Daniel Borca + + Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer + + Copyright (C) 1998 - 2010 Gilles Vollant, Even Rouault, Mathias Svensson + +*/ + + +#include +#include +#include + +#ifndef NOUNCRYPT + #define NOUNCRYPT +#endif + +#include "zlib.h" +#include "unzip.h" + +#ifdef STDC +# include +# include +# include +#endif +#ifdef NO_ERRNO_H + extern int errno; +#else +# include +#endif + + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + + +#ifndef CASESENSITIVITYDEFAULT_NO +# if !defined(unix) && !defined(CASESENSITIVITYDEFAULT_YES) +# define CASESENSITIVITYDEFAULT_NO +# endif +#endif + + +#ifndef UNZ_BUFSIZE +#define UNZ_BUFSIZE (16384) +#endif + +#ifndef UNZ_MAXFILENAMEINZIP +#define UNZ_MAXFILENAMEINZIP (256) +#endif + +#ifndef ALLOC +# define ALLOC(size) (malloc(size)) +#endif +#ifndef TRYFREE +# define TRYFREE(p) {if (p) free(p);} +#endif + +#define SIZECENTRALDIRITEM (0x2e) +#define SIZEZIPLOCALHEADER (0x1e) + + +const char unz_copyright[] = + " unzip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll"; + +/* unz_file_info_interntal contain internal info about a file in zipfile*/ +typedef struct unz_file_info64_internal_s +{ + ZPOS64_T offset_curfile;/* relative offset of local header 8 bytes */ +} unz_file_info64_internal; + + +/* file_in_zip_read_info_s contain internal information about a file in zipfile, + when reading and decompress it */ +typedef struct +{ + char *read_buffer; /* internal buffer for compressed data */ + z_stream stream; /* zLib stream structure for inflate */ + +#ifdef HAVE_BZIP2 + bz_stream bstream; /* bzLib stream structure for bziped */ +#endif + + ZPOS64_T pos_in_zipfile; /* position in byte on the zipfile, for fseek*/ + uLong stream_initialised; /* flag set if stream structure is initialised*/ + + ZPOS64_T offset_local_extrafield;/* offset of the local extra field */ + uInt size_local_extrafield;/* size of the local extra field */ + ZPOS64_T pos_local_extrafield; /* position in the local extra field in read*/ + ZPOS64_T total_out_64; + + uLong crc32; /* crc32 of all data uncompressed */ + uLong crc32_wait; /* crc32 we must obtain after decompress all */ + ZPOS64_T rest_read_compressed; /* number of byte to be decompressed */ + ZPOS64_T rest_read_uncompressed;/*number of byte to be obtained after decomp*/ + zlib_filefunc64_32_def z_filefunc; + voidpf filestream; /* io structore of the zipfile */ + uLong compression_method; /* compression method (0==store) */ + ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ + int raw; +} file_in_zip64_read_info_s; + + +/* unz64_s contain internal information about the zipfile +*/ +typedef struct +{ + zlib_filefunc64_32_def z_filefunc; + int is64bitOpenFunction; + voidpf filestream; /* io structore of the zipfile */ + unz_global_info64 gi; /* public global information */ + ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ + ZPOS64_T num_file; /* number of the current file in the zipfile*/ + ZPOS64_T pos_in_central_dir; /* pos of the current file in the central dir*/ + ZPOS64_T current_file_ok; /* flag about the usability of the current file*/ + ZPOS64_T central_pos; /* position of the beginning of the central dir*/ + + ZPOS64_T size_central_dir; /* size of the central directory */ + ZPOS64_T offset_central_dir; /* offset of start of central directory with + respect to the starting disk number */ + + unz_file_info64 cur_file_info; /* public info about the current file in zip*/ + unz_file_info64_internal cur_file_info_internal; /* private info about it*/ + file_in_zip64_read_info_s* pfile_in_zip_read; /* structure about the current + file if we are decompressing it */ + int encrypted; + + int isZip64; + +# ifndef NOUNCRYPT + unsigned long keys[3]; /* keys defining the pseudo-random sequence */ + const z_crc_t* pcrc_32_tab; +# endif +} unz64_s; + + +#ifndef NOUNCRYPT +#include "crypt.h" +#endif + +/* =========================================================================== + Read a byte from a gz_stream; update next_in and avail_in. Return EOF + for end of file. + IN assertion: the stream s has been successfully opened for reading. +*/ + + +local int unz64local_getByte OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + int *pi)); + +local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi) +{ + unsigned char c; + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); + if (err==1) + { + *pi = (int)c; + return UNZ_OK; + } + else + { + if (ZERROR64(*pzlib_filefunc_def,filestream)) + return UNZ_ERRNO; + else + return UNZ_EOF; + } +} + + +/* =========================================================================== + Reads a long in LSB order from the given gz_stream. Sets +*/ +local int unz64local_getShort OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX)); + +local int unz64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) +{ + uLong x ; + int i = 0; + int err; + + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (uLong)i; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((uLong)i)<<8; + + if (err==UNZ_OK) + *pX = x; + else + *pX = 0; + return err; +} + +local int unz64local_getLong OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX)); + +local int unz64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) +{ + uLong x ; + int i = 0; + int err; + + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (uLong)i; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((uLong)i)<<8; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((uLong)i)<<16; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((uLong)i)<<24; + + if (err==UNZ_OK) + *pX = x; + else + *pX = 0; + return err; +} + +local int unz64local_getLong64 OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + ZPOS64_T *pX)); + + +local int unz64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + ZPOS64_T *pX) +{ + ZPOS64_T x ; + int i = 0; + int err; + + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (ZPOS64_T)i; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<8; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<16; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<24; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<32; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<40; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<48; + + if (err==UNZ_OK) + err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); + x |= ((ZPOS64_T)i)<<56; + + if (err==UNZ_OK) + *pX = x; + else + *pX = 0; + return err; +} + +/* My own strcmpi / strcasecmp */ +local int strcmpcasenosensitive_internal (const char* fileName1, const char* fileName2) +{ + for (;;) + { + char c1=*(fileName1++); + char c2=*(fileName2++); + if ((c1>='a') && (c1<='z')) + c1 -= 0x20; + if ((c2>='a') && (c2<='z')) + c2 -= 0x20; + if (c1=='\0') + return ((c2=='\0') ? 0 : -1); + if (c2=='\0') + return 1; + if (c1c2) + return 1; + } +} + + +#ifdef CASESENSITIVITYDEFAULT_NO +#define CASESENSITIVITYDEFAULTVALUE 2 +#else +#define CASESENSITIVITYDEFAULTVALUE 1 +#endif + +#ifndef STRCMPCASENOSENTIVEFUNCTION +#define STRCMPCASENOSENTIVEFUNCTION strcmpcasenosensitive_internal +#endif + +/* + Compare two filename (fileName1,fileName2). + If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp) + If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi + or strcasecmp) + If iCaseSenisivity = 0, case sensitivity is defaut of your operating system + (like 1 on Unix, 2 on Windows) + +*/ +extern int ZEXPORT unzStringFileNameCompare (const char* fileName1, + const char* fileName2, + int iCaseSensitivity) + +{ + if (iCaseSensitivity==0) + iCaseSensitivity=CASESENSITIVITYDEFAULTVALUE; + + if (iCaseSensitivity==1) + return strcmp(fileName1,fileName2); + + return STRCMPCASENOSENTIVEFUNCTION(fileName1,fileName2); +} + +#ifndef BUFREADCOMMENT +#define BUFREADCOMMENT (0x400) +#endif + +/* + Locate the Central directory of a zipfile (at the end, just before + the global comment) +*/ +local ZPOS64_T unz64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); +local ZPOS64_T unz64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) +{ + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackReaduMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && + ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) + { + uPosFound = uReadPos+i; + break; + } + + if (uPosFound!=0) + break; + } + TRYFREE(buf); + return uPosFound; +} + + +/* + Locate the Central directory 64 of a zipfile (at the end, just before + the global comment) +*/ +local ZPOS64_T unz64local_SearchCentralDir64 OF(( + const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream)); + +local ZPOS64_T unz64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream) +{ + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + uLong uL; + ZPOS64_T relativeOffset; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackReaduMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && + ((*(buf+i+2))==0x06) && ((*(buf+i+3))==0x07)) + { + uPosFound = uReadPos+i; + break; + } + + if (uPosFound!=0) + break; + } + TRYFREE(buf); + if (uPosFound == 0) + return 0; + + /* Zip64 end of central directory locator */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, uPosFound,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature, already checked */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + + /* number of the disk with the start of the zip64 end of central directory */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + if (uL != 0) + return 0; + + /* relative offset of the zip64 end of central directory record */ + if (unz64local_getLong64(pzlib_filefunc_def,filestream,&relativeOffset)!=UNZ_OK) + return 0; + + /* total number of disks */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + if (uL != 1) + return 0; + + /* Goto end of central directory record */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, relativeOffset,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature */ + if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) + return 0; + + if (uL != 0x06064b50) + return 0; + + return relativeOffset; +} + +/* + Open a Zip file. path contain the full pathname (by example, + on a Windows NT computer "c:\\test\\zlib114.zip" or on an Unix computer + "zlib/zlib114.zip". + If the zipfile cannot be opened (file doesn't exist or in not valid), the + return value is NULL. + Else, the return value is a unzFile Handle, usable with other function + of this unzip package. +*/ +local unzFile unzOpenInternal (const void *path, + zlib_filefunc64_32_def* pzlib_filefunc64_32_def, + int is64bitOpenFunction) +{ + unz64_s us; + unz64_s *s; + ZPOS64_T central_pos; + uLong uL; + + uLong number_disk; /* number of the current dist, used for + spaning ZIP, unsupported, always 0*/ + uLong number_disk_with_CD; /* number the the disk with central dir, used + for spaning ZIP, unsupported, always 0*/ + ZPOS64_T number_entry_CD; /* total number of entries in + the central dir + (same than number_entry on nospan) */ + + int err=UNZ_OK; + + if (unz_copyright[0]!=' ') + return NULL; + + us.z_filefunc.zseek32_file = NULL; + us.z_filefunc.ztell32_file = NULL; + if (pzlib_filefunc64_32_def==NULL) + fill_fopen64_filefunc(&us.z_filefunc.zfile_func64); + else + us.z_filefunc = *pzlib_filefunc64_32_def; + us.is64bitOpenFunction = is64bitOpenFunction; + + + + us.filestream = ZOPEN64(us.z_filefunc, + path, + ZLIB_FILEFUNC_MODE_READ | + ZLIB_FILEFUNC_MODE_EXISTING); + if (us.filestream==NULL) + return NULL; + + central_pos = unz64local_SearchCentralDir64(&us.z_filefunc,us.filestream); + if (central_pos) + { + uLong uS; + ZPOS64_T uL64; + + us.isZip64 = 1; + + if (ZSEEK64(us.z_filefunc, us.filestream, + central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) + err=UNZ_ERRNO; + + /* the signature, already checked */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + + /* size of zip64 end of central directory record */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&uL64)!=UNZ_OK) + err=UNZ_ERRNO; + + /* version made by */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uS)!=UNZ_OK) + err=UNZ_ERRNO; + + /* version needed to extract */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uS)!=UNZ_OK) + err=UNZ_ERRNO; + + /* number of this disk */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) + err=UNZ_ERRNO; + + /* number of the disk with the start of the central directory */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) + err=UNZ_ERRNO; + + /* total number of entries in the central directory on this disk */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.gi.number_entry)!=UNZ_OK) + err=UNZ_ERRNO; + + /* total number of entries in the central directory */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&number_entry_CD)!=UNZ_OK) + err=UNZ_ERRNO; + + if ((number_entry_CD!=us.gi.number_entry) || + (number_disk_with_CD!=0) || + (number_disk!=0)) + err=UNZ_BADZIPFILE; + + /* size of the central directory */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.size_central_dir)!=UNZ_OK) + err=UNZ_ERRNO; + + /* offset of start of central directory with respect to the + starting disk number */ + if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.offset_central_dir)!=UNZ_OK) + err=UNZ_ERRNO; + + us.gi.size_comment = 0; + } + else + { + central_pos = unz64local_SearchCentralDir(&us.z_filefunc,us.filestream); + if (central_pos==0) + err=UNZ_ERRNO; + + us.isZip64 = 0; + + if (ZSEEK64(us.z_filefunc, us.filestream, + central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) + err=UNZ_ERRNO; + + /* the signature, already checked */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + + /* number of this disk */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) + err=UNZ_ERRNO; + + /* number of the disk with the start of the central directory */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) + err=UNZ_ERRNO; + + /* total number of entries in the central dir on this disk */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + us.gi.number_entry = uL; + + /* total number of entries in the central dir */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + number_entry_CD = uL; + + if ((number_entry_CD!=us.gi.number_entry) || + (number_disk_with_CD!=0) || + (number_disk!=0)) + err=UNZ_BADZIPFILE; + + /* size of the central directory */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + us.size_central_dir = uL; + + /* offset of start of central directory with respect to the + starting disk number */ + if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) + err=UNZ_ERRNO; + us.offset_central_dir = uL; + + /* zipfile comment length */ + if (unz64local_getShort(&us.z_filefunc, us.filestream,&us.gi.size_comment)!=UNZ_OK) + err=UNZ_ERRNO; + } + + if ((central_pospfile_in_zip_read!=NULL) + unzCloseCurrentFile(file); + + ZCLOSE64(s->z_filefunc, s->filestream); + TRYFREE(s); + return UNZ_OK; +} + + +/* + Write info about the ZipFile in the *pglobal_info structure. + No preparation of the structure is needed + return UNZ_OK if there is no problem. */ +extern int ZEXPORT unzGetGlobalInfo64 (unzFile file, unz_global_info64* pglobal_info) +{ + unz64_s* s; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + *pglobal_info=s->gi; + return UNZ_OK; +} + +extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info32) +{ + unz64_s* s; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + /* to do : check if number_entry is not truncated */ + pglobal_info32->number_entry = (uLong)s->gi.number_entry; + pglobal_info32->size_comment = s->gi.size_comment; + return UNZ_OK; +} +/* + Translate date/time from Dos format to tm_unz (readable more easilty) +*/ +local void unz64local_DosDateToTmuDate (ZPOS64_T ulDosDate, tm_unz* ptm) +{ + ZPOS64_T uDate; + uDate = (ZPOS64_T)(ulDosDate>>16); + ptm->tm_mday = (uInt)(uDate&0x1f) ; + ptm->tm_mon = (uInt)((((uDate)&0x1E0)/0x20)-1) ; + ptm->tm_year = (uInt)(((uDate&0x0FE00)/0x0200)+1980) ; + + ptm->tm_hour = (uInt) ((ulDosDate &0xF800)/0x800); + ptm->tm_min = (uInt) ((ulDosDate&0x7E0)/0x20) ; + ptm->tm_sec = (uInt) (2*(ulDosDate&0x1f)) ; +} + +/* + Get Info about the current file in the zipfile, with internal only info +*/ +local int unz64local_GetCurrentFileInfoInternal OF((unzFile file, + unz_file_info64 *pfile_info, + unz_file_info64_internal + *pfile_info_internal, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize)); + +local int unz64local_GetCurrentFileInfoInternal (unzFile file, + unz_file_info64 *pfile_info, + unz_file_info64_internal + *pfile_info_internal, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize) +{ + unz64_s* s; + unz_file_info64 file_info; + unz_file_info64_internal file_info_internal; + int err=UNZ_OK; + uLong uMagic; + long lSeek=0; + uLong uL; + + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + if (ZSEEK64(s->z_filefunc, s->filestream, + s->pos_in_central_dir+s->byte_before_the_zipfile, + ZLIB_FILEFUNC_SEEK_SET)!=0) + err=UNZ_ERRNO; + + + /* we check the magic */ + if (err==UNZ_OK) + { + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) + err=UNZ_ERRNO; + else if (uMagic!=0x02014b50) + err=UNZ_BADZIPFILE; + } + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.version) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.version_needed) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.flag) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.compression_method) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.dosDate) != UNZ_OK) + err=UNZ_ERRNO; + + unz64local_DosDateToTmuDate(file_info.dosDate,&file_info.tmu_date); + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.crc) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) + err=UNZ_ERRNO; + file_info.compressed_size = uL; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) + err=UNZ_ERRNO; + file_info.uncompressed_size = uL; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_filename) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_extra) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_comment) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.disk_num_start) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.internal_fa) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.external_fa) != UNZ_OK) + err=UNZ_ERRNO; + + /* relative offset of local header */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) + err=UNZ_ERRNO; + file_info_internal.offset_curfile = uL; + + lSeek+=file_info.size_filename; + if ((err==UNZ_OK) && (szFileName!=NULL)) + { + uLong uSizeRead ; + if (file_info.size_filename0) && (fileNameBufferSize>0)) + if (ZREAD64(s->z_filefunc, s->filestream,szFileName,uSizeRead)!=uSizeRead) + err=UNZ_ERRNO; + lSeek -= uSizeRead; + } + + /* Read extrafield */ + if ((err==UNZ_OK) && (extraField!=NULL)) + { + ZPOS64_T uSizeRead ; + if (file_info.size_file_extraz_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + lSeek=0; + else + err=UNZ_ERRNO; + } + + if ((file_info.size_file_extra>0) && (extraFieldBufferSize>0)) + if (ZREAD64(s->z_filefunc, s->filestream,extraField,(uLong)uSizeRead)!=uSizeRead) + err=UNZ_ERRNO; + + lSeek += file_info.size_file_extra - (uLong)uSizeRead; + } + else + lSeek += file_info.size_file_extra; + + + if ((err==UNZ_OK) && (file_info.size_file_extra != 0)) + { + uLong acc = 0; + + /* since lSeek now points to after the extra field we need to move back */ + lSeek -= file_info.size_file_extra; + + if (lSeek!=0) + { + if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + lSeek=0; + else + err=UNZ_ERRNO; + } + + while(acc < file_info.size_file_extra) + { + uLong headerId; + uLong dataSize; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&headerId) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&dataSize) != UNZ_OK) + err=UNZ_ERRNO; + + /* ZIP64 extra fields */ + if (headerId == 0x0001) + { + uLong uL; + + if(file_info.uncompressed_size == MAXU32) + { + if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.uncompressed_size) != UNZ_OK) + err=UNZ_ERRNO; + } + + if(file_info.compressed_size == MAXU32) + { + if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.compressed_size) != UNZ_OK) + err=UNZ_ERRNO; + } + + if(file_info_internal.offset_curfile == MAXU32) + { + /* Relative Header offset */ + if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info_internal.offset_curfile) != UNZ_OK) + err=UNZ_ERRNO; + } + + if(file_info.disk_num_start == MAXU32) + { + /* Disk Start Number */ + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) + err=UNZ_ERRNO; + } + + } + else + { + if (ZSEEK64(s->z_filefunc, s->filestream,dataSize,ZLIB_FILEFUNC_SEEK_CUR)!=0) + err=UNZ_ERRNO; + } + + acc += 2 + 2 + dataSize; + } + } + + if ((err==UNZ_OK) && (szComment!=NULL)) + { + uLong uSizeRead ; + if (file_info.size_file_commentz_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + lSeek=0; + else + err=UNZ_ERRNO; + } + + if ((file_info.size_file_comment>0) && (commentBufferSize>0)) + if (ZREAD64(s->z_filefunc, s->filestream,szComment,uSizeRead)!=uSizeRead) + err=UNZ_ERRNO; + lSeek+=file_info.size_file_comment - uSizeRead; + } + else + lSeek+=file_info.size_file_comment; + + + if ((err==UNZ_OK) && (pfile_info!=NULL)) + *pfile_info=file_info; + + if ((err==UNZ_OK) && (pfile_info_internal!=NULL)) + *pfile_info_internal=file_info_internal; + + return err; +} + + + +/* + Write info about the ZipFile in the *pglobal_info structure. + No preparation of the structure is needed + return UNZ_OK if there is no problem. +*/ +extern int ZEXPORT unzGetCurrentFileInfo64 (unzFile file, + unz_file_info64 * pfile_info, + char * szFileName, uLong fileNameBufferSize, + void *extraField, uLong extraFieldBufferSize, + char* szComment, uLong commentBufferSize) +{ + return unz64local_GetCurrentFileInfoInternal(file,pfile_info,NULL, + szFileName,fileNameBufferSize, + extraField,extraFieldBufferSize, + szComment,commentBufferSize); +} + +extern int ZEXPORT unzGetCurrentFileInfo (unzFile file, + unz_file_info * pfile_info, + char * szFileName, uLong fileNameBufferSize, + void *extraField, uLong extraFieldBufferSize, + char* szComment, uLong commentBufferSize) +{ + int err; + unz_file_info64 file_info64; + err = unz64local_GetCurrentFileInfoInternal(file,&file_info64,NULL, + szFileName,fileNameBufferSize, + extraField,extraFieldBufferSize, + szComment,commentBufferSize); + if ((err==UNZ_OK) && (pfile_info != NULL)) + { + pfile_info->version = file_info64.version; + pfile_info->version_needed = file_info64.version_needed; + pfile_info->flag = file_info64.flag; + pfile_info->compression_method = file_info64.compression_method; + pfile_info->dosDate = file_info64.dosDate; + pfile_info->crc = file_info64.crc; + + pfile_info->size_filename = file_info64.size_filename; + pfile_info->size_file_extra = file_info64.size_file_extra; + pfile_info->size_file_comment = file_info64.size_file_comment; + + pfile_info->disk_num_start = file_info64.disk_num_start; + pfile_info->internal_fa = file_info64.internal_fa; + pfile_info->external_fa = file_info64.external_fa; + + pfile_info->tmu_date = file_info64.tmu_date, + + + pfile_info->compressed_size = (uLong)file_info64.compressed_size; + pfile_info->uncompressed_size = (uLong)file_info64.uncompressed_size; + + } + return err; +} +/* + Set the current file of the zipfile to the first file. + return UNZ_OK if there is no problem +*/ +extern int ZEXPORT unzGoToFirstFile (unzFile file) +{ + int err=UNZ_OK; + unz64_s* s; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + s->pos_in_central_dir=s->offset_central_dir; + s->num_file=0; + err=unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, + &s->cur_file_info_internal, + NULL,0,NULL,0,NULL,0); + s->current_file_ok = (err == UNZ_OK); + return err; +} + +/* + Set the current file of the zipfile to the next file. + return UNZ_OK if there is no problem + return UNZ_END_OF_LIST_OF_FILE if the actual file was the latest. +*/ +extern int ZEXPORT unzGoToNextFile (unzFile file) +{ + unz64_s* s; + int err; + + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + if (!s->current_file_ok) + return UNZ_END_OF_LIST_OF_FILE; + if (s->gi.number_entry != 0xffff) /* 2^16 files overflow hack */ + if (s->num_file+1==s->gi.number_entry) + return UNZ_END_OF_LIST_OF_FILE; + + s->pos_in_central_dir += SIZECENTRALDIRITEM + s->cur_file_info.size_filename + + s->cur_file_info.size_file_extra + s->cur_file_info.size_file_comment ; + s->num_file++; + err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, + &s->cur_file_info_internal, + NULL,0,NULL,0,NULL,0); + s->current_file_ok = (err == UNZ_OK); + return err; +} + + +/* + Try locate the file szFileName in the zipfile. + For the iCaseSensitivity signification, see unzStringFileNameCompare + + return value : + UNZ_OK if the file is found. It becomes the current file. + UNZ_END_OF_LIST_OF_FILE if the file is not found +*/ +extern int ZEXPORT unzLocateFile (unzFile file, const char *szFileName, int iCaseSensitivity) +{ + unz64_s* s; + int err; + + /* We remember the 'current' position in the file so that we can jump + * back there if we fail. + */ + unz_file_info64 cur_file_infoSaved; + unz_file_info64_internal cur_file_info_internalSaved; + ZPOS64_T num_fileSaved; + ZPOS64_T pos_in_central_dirSaved; + + + if (file==NULL) + return UNZ_PARAMERROR; + + if (strlen(szFileName)>=UNZ_MAXFILENAMEINZIP) + return UNZ_PARAMERROR; + + s=(unz64_s*)file; + if (!s->current_file_ok) + return UNZ_END_OF_LIST_OF_FILE; + + /* Save the current state */ + num_fileSaved = s->num_file; + pos_in_central_dirSaved = s->pos_in_central_dir; + cur_file_infoSaved = s->cur_file_info; + cur_file_info_internalSaved = s->cur_file_info_internal; + + err = unzGoToFirstFile(file); + + while (err == UNZ_OK) + { + char szCurrentFileName[UNZ_MAXFILENAMEINZIP+1]; + err = unzGetCurrentFileInfo64(file,NULL, + szCurrentFileName,sizeof(szCurrentFileName)-1, + NULL,0,NULL,0); + if (err == UNZ_OK) + { + if (unzStringFileNameCompare(szCurrentFileName, + szFileName,iCaseSensitivity)==0) + return UNZ_OK; + err = unzGoToNextFile(file); + } + } + + /* We failed, so restore the state of the 'current file' to where we + * were. + */ + s->num_file = num_fileSaved ; + s->pos_in_central_dir = pos_in_central_dirSaved ; + s->cur_file_info = cur_file_infoSaved; + s->cur_file_info_internal = cur_file_info_internalSaved; + return err; +} + + +/* +/////////////////////////////////////////// +// Contributed by Ryan Haksi (mailto://cryogen@infoserve.net) +// I need random access +// +// Further optimization could be realized by adding an ability +// to cache the directory in memory. The goal being a single +// comprehensive file read to put the file I need in a memory. +*/ + +/* +typedef struct unz_file_pos_s +{ + ZPOS64_T pos_in_zip_directory; // offset in file + ZPOS64_T num_of_file; // # of file +} unz_file_pos; +*/ + +extern int ZEXPORT unzGetFilePos64(unzFile file, unz64_file_pos* file_pos) +{ + unz64_s* s; + + if (file==NULL || file_pos==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + if (!s->current_file_ok) + return UNZ_END_OF_LIST_OF_FILE; + + file_pos->pos_in_zip_directory = s->pos_in_central_dir; + file_pos->num_of_file = s->num_file; + + return UNZ_OK; +} + +extern int ZEXPORT unzGetFilePos( + unzFile file, + unz_file_pos* file_pos) +{ + unz64_file_pos file_pos64; + int err = unzGetFilePos64(file,&file_pos64); + if (err==UNZ_OK) + { + file_pos->pos_in_zip_directory = (uLong)file_pos64.pos_in_zip_directory; + file_pos->num_of_file = (uLong)file_pos64.num_of_file; + } + return err; +} + +extern int ZEXPORT unzGoToFilePos64(unzFile file, const unz64_file_pos* file_pos) +{ + unz64_s* s; + int err; + + if (file==NULL || file_pos==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + + /* jump to the right spot */ + s->pos_in_central_dir = file_pos->pos_in_zip_directory; + s->num_file = file_pos->num_of_file; + + /* set the current file */ + err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, + &s->cur_file_info_internal, + NULL,0,NULL,0,NULL,0); + /* return results */ + s->current_file_ok = (err == UNZ_OK); + return err; +} + +extern int ZEXPORT unzGoToFilePos( + unzFile file, + unz_file_pos* file_pos) +{ + unz64_file_pos file_pos64; + if (file_pos == NULL) + return UNZ_PARAMERROR; + + file_pos64.pos_in_zip_directory = file_pos->pos_in_zip_directory; + file_pos64.num_of_file = file_pos->num_of_file; + return unzGoToFilePos64(file,&file_pos64); +} + +/* +// Unzip Helper Functions - should be here? +/////////////////////////////////////////// +*/ + +/* + Read the local header of the current zipfile + Check the coherency of the local header and info in the end of central + directory about this file + store in *piSizeVar the size of extra info in local header + (filename and size of extra field data) +*/ +local int unz64local_CheckCurrentFileCoherencyHeader (unz64_s* s, uInt* piSizeVar, + ZPOS64_T * poffset_local_extrafield, + uInt * psize_local_extrafield) +{ + uLong uMagic,uData,uFlags; + uLong size_filename; + uLong size_extra_field; + int err=UNZ_OK; + + *piSizeVar = 0; + *poffset_local_extrafield = 0; + *psize_local_extrafield = 0; + + if (ZSEEK64(s->z_filefunc, s->filestream,s->cur_file_info_internal.offset_curfile + + s->byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET)!=0) + return UNZ_ERRNO; + + + if (err==UNZ_OK) + { + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) + err=UNZ_ERRNO; + else if (uMagic!=0x04034b50) + err=UNZ_BADZIPFILE; + } + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) + err=UNZ_ERRNO; +/* + else if ((err==UNZ_OK) && (uData!=s->cur_file_info.wVersion)) + err=UNZ_BADZIPFILE; +*/ + if (unz64local_getShort(&s->z_filefunc, s->filestream,&uFlags) != UNZ_OK) + err=UNZ_ERRNO; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) + err=UNZ_ERRNO; + else if ((err==UNZ_OK) && (uData!=s->cur_file_info.compression_method)) + err=UNZ_BADZIPFILE; + + if ((err==UNZ_OK) && (s->cur_file_info.compression_method!=0) && +/* #ifdef HAVE_BZIP2 */ + (s->cur_file_info.compression_method!=Z_BZIP2ED) && +/* #endif */ + (s->cur_file_info.compression_method!=Z_DEFLATED)) + err=UNZ_BADZIPFILE; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* date/time */ + err=UNZ_ERRNO; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* crc */ + err=UNZ_ERRNO; + else if ((err==UNZ_OK) && (uData!=s->cur_file_info.crc) && ((uFlags & 8)==0)) + err=UNZ_BADZIPFILE; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size compr */ + err=UNZ_ERRNO; + else if (uData != 0xFFFFFFFF && (err==UNZ_OK) && (uData!=s->cur_file_info.compressed_size) && ((uFlags & 8)==0)) + err=UNZ_BADZIPFILE; + + if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size uncompr */ + err=UNZ_ERRNO; + else if (uData != 0xFFFFFFFF && (err==UNZ_OK) && (uData!=s->cur_file_info.uncompressed_size) && ((uFlags & 8)==0)) + err=UNZ_BADZIPFILE; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&size_filename) != UNZ_OK) + err=UNZ_ERRNO; + else if ((err==UNZ_OK) && (size_filename!=s->cur_file_info.size_filename)) + err=UNZ_BADZIPFILE; + + *piSizeVar += (uInt)size_filename; + + if (unz64local_getShort(&s->z_filefunc, s->filestream,&size_extra_field) != UNZ_OK) + err=UNZ_ERRNO; + *poffset_local_extrafield= s->cur_file_info_internal.offset_curfile + + SIZEZIPLOCALHEADER + size_filename; + *psize_local_extrafield = (uInt)size_extra_field; + + *piSizeVar += (uInt)size_extra_field; + + return err; +} + +/* + Open for reading data the current file in the zipfile. + If there is no error and the file is opened, the return value is UNZ_OK. +*/ +extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, + int* level, int raw, const char* password) +{ + int err=UNZ_OK; + uInt iSizeVar; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + ZPOS64_T offset_local_extrafield; /* offset of the local extra field */ + uInt size_local_extrafield; /* size of the local extra field */ +# ifndef NOUNCRYPT + char source[12]; +# else + if (password != NULL) + return UNZ_PARAMERROR; +# endif + + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + if (!s->current_file_ok) + return UNZ_PARAMERROR; + + if (s->pfile_in_zip_read != NULL) + unzCloseCurrentFile(file); + + if (unz64local_CheckCurrentFileCoherencyHeader(s,&iSizeVar, &offset_local_extrafield,&size_local_extrafield)!=UNZ_OK) + return UNZ_BADZIPFILE; + + pfile_in_zip_read_info = (file_in_zip64_read_info_s*)ALLOC(sizeof(file_in_zip64_read_info_s)); + if (pfile_in_zip_read_info==NULL) + return UNZ_INTERNALERROR; + + pfile_in_zip_read_info->read_buffer=(char*)ALLOC(UNZ_BUFSIZE); + pfile_in_zip_read_info->offset_local_extrafield = offset_local_extrafield; + pfile_in_zip_read_info->size_local_extrafield = size_local_extrafield; + pfile_in_zip_read_info->pos_local_extrafield=0; + pfile_in_zip_read_info->raw=raw; + + if (pfile_in_zip_read_info->read_buffer==NULL) + { + TRYFREE(pfile_in_zip_read_info); + return UNZ_INTERNALERROR; + } + + pfile_in_zip_read_info->stream_initialised=0; + + if (method!=NULL) + *method = (int)s->cur_file_info.compression_method; + + if (level!=NULL) + { + *level = 6; + switch (s->cur_file_info.flag & 0x06) + { + case 6 : *level = 1; break; + case 4 : *level = 2; break; + case 2 : *level = 9; break; + } + } + + if ((s->cur_file_info.compression_method!=0) && +/* #ifdef HAVE_BZIP2 */ + (s->cur_file_info.compression_method!=Z_BZIP2ED) && +/* #endif */ + (s->cur_file_info.compression_method!=Z_DEFLATED)) + + err=UNZ_BADZIPFILE; + + pfile_in_zip_read_info->crc32_wait=s->cur_file_info.crc; + pfile_in_zip_read_info->crc32=0; + pfile_in_zip_read_info->total_out_64=0; + pfile_in_zip_read_info->compression_method = s->cur_file_info.compression_method; + pfile_in_zip_read_info->filestream=s->filestream; + pfile_in_zip_read_info->z_filefunc=s->z_filefunc; + pfile_in_zip_read_info->byte_before_the_zipfile=s->byte_before_the_zipfile; + + pfile_in_zip_read_info->stream.total_out = 0; + + if ((s->cur_file_info.compression_method==Z_BZIP2ED) && (!raw)) + { +#ifdef HAVE_BZIP2 + pfile_in_zip_read_info->bstream.bzalloc = (void *(*) (void *, int, int))0; + pfile_in_zip_read_info->bstream.bzfree = (free_func)0; + pfile_in_zip_read_info->bstream.opaque = (voidpf)0; + pfile_in_zip_read_info->bstream.state = (voidpf)0; + + pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; + pfile_in_zip_read_info->stream.zfree = (free_func)0; + pfile_in_zip_read_info->stream.opaque = (voidpf)0; + pfile_in_zip_read_info->stream.next_in = (voidpf)0; + pfile_in_zip_read_info->stream.avail_in = 0; + + err=BZ2_bzDecompressInit(&pfile_in_zip_read_info->bstream, 0, 0); + if (err == Z_OK) + pfile_in_zip_read_info->stream_initialised=Z_BZIP2ED; + else + { + TRYFREE(pfile_in_zip_read_info); + return err; + } +#else + pfile_in_zip_read_info->raw=1; +#endif + } + else if ((s->cur_file_info.compression_method==Z_DEFLATED) && (!raw)) + { + pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; + pfile_in_zip_read_info->stream.zfree = (free_func)0; + pfile_in_zip_read_info->stream.opaque = (voidpf)0; + pfile_in_zip_read_info->stream.next_in = 0; + pfile_in_zip_read_info->stream.avail_in = 0; + + err=inflateInit2(&pfile_in_zip_read_info->stream, -MAX_WBITS); + if (err == Z_OK) + pfile_in_zip_read_info->stream_initialised=Z_DEFLATED; + else + { + TRYFREE(pfile_in_zip_read_info); + return err; + } + /* windowBits is passed < 0 to tell that there is no zlib header. + * Note that in this case inflate *requires* an extra "dummy" byte + * after the compressed stream in order to complete decompression and + * return Z_STREAM_END. + * In unzip, i don't wait absolutely Z_STREAM_END because I known the + * size of both compressed and uncompressed data + */ + } + pfile_in_zip_read_info->rest_read_compressed = + s->cur_file_info.compressed_size ; + pfile_in_zip_read_info->rest_read_uncompressed = + s->cur_file_info.uncompressed_size ; + + + pfile_in_zip_read_info->pos_in_zipfile = + s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + + iSizeVar; + + pfile_in_zip_read_info->stream.avail_in = (uInt)0; + + s->pfile_in_zip_read = pfile_in_zip_read_info; + s->encrypted = 0; + +# ifndef NOUNCRYPT + if (password != NULL) + { + int i; + s->pcrc_32_tab = get_crc_table(); + init_keys(password,s->keys,s->pcrc_32_tab); + if (ZSEEK64(s->z_filefunc, s->filestream, + s->pfile_in_zip_read->pos_in_zipfile + + s->pfile_in_zip_read->byte_before_the_zipfile, + SEEK_SET)!=0) + return UNZ_INTERNALERROR; + if(ZREAD64(s->z_filefunc, s->filestream,source, 12)<12) + return UNZ_INTERNALERROR; + + for (i = 0; i<12; i++) + zdecode(s->keys,s->pcrc_32_tab,source[i]); + + s->pfile_in_zip_read->pos_in_zipfile+=12; + s->encrypted=1; + } +# endif + + + return UNZ_OK; +} + +extern int ZEXPORT unzOpenCurrentFile (unzFile file) +{ + return unzOpenCurrentFile3(file, NULL, NULL, 0, NULL); +} + +extern int ZEXPORT unzOpenCurrentFilePassword (unzFile file, const char* password) +{ + return unzOpenCurrentFile3(file, NULL, NULL, 0, password); +} + +extern int ZEXPORT unzOpenCurrentFile2 (unzFile file, int* method, int* level, int raw) +{ + return unzOpenCurrentFile3(file, method, level, raw, NULL); +} + +/** Addition for GDAL : START */ + +extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64( unzFile file) +{ + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + s=(unz64_s*)file; + if (file==NULL) + return 0; /* UNZ_PARAMERROR; */ + pfile_in_zip_read_info=s->pfile_in_zip_read; + if (pfile_in_zip_read_info==NULL) + return 0; /* UNZ_PARAMERROR; */ + return pfile_in_zip_read_info->pos_in_zipfile + + pfile_in_zip_read_info->byte_before_the_zipfile; +} + +/** Addition for GDAL : END */ + +/* + Read bytes from the current file. + buf contain buffer where data must be copied + len the size of buf. + + return the number of byte copied if somes bytes are copied + return 0 if the end of file was reached + return <0 with error code if there is an error + (UNZ_ERRNO for IO error, or zLib error for uncompress error) +*/ +extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) +{ + int err=UNZ_OK; + uInt iRead = 0; + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return UNZ_PARAMERROR; + + + if (pfile_in_zip_read_info->read_buffer == NULL) + return UNZ_END_OF_LIST_OF_FILE; + if (len==0) + return 0; + + pfile_in_zip_read_info->stream.next_out = (Bytef*)buf; + + pfile_in_zip_read_info->stream.avail_out = (uInt)len; + + if ((len>pfile_in_zip_read_info->rest_read_uncompressed) && + (!(pfile_in_zip_read_info->raw))) + pfile_in_zip_read_info->stream.avail_out = + (uInt)pfile_in_zip_read_info->rest_read_uncompressed; + + if ((len>pfile_in_zip_read_info->rest_read_compressed+ + pfile_in_zip_read_info->stream.avail_in) && + (pfile_in_zip_read_info->raw)) + pfile_in_zip_read_info->stream.avail_out = + (uInt)pfile_in_zip_read_info->rest_read_compressed+ + pfile_in_zip_read_info->stream.avail_in; + + while (pfile_in_zip_read_info->stream.avail_out>0) + { + if ((pfile_in_zip_read_info->stream.avail_in==0) && + (pfile_in_zip_read_info->rest_read_compressed>0)) + { + uInt uReadThis = UNZ_BUFSIZE; + if (pfile_in_zip_read_info->rest_read_compressedrest_read_compressed; + if (uReadThis == 0) + return UNZ_EOF; + if (ZSEEK64(pfile_in_zip_read_info->z_filefunc, + pfile_in_zip_read_info->filestream, + pfile_in_zip_read_info->pos_in_zipfile + + pfile_in_zip_read_info->byte_before_the_zipfile, + ZLIB_FILEFUNC_SEEK_SET)!=0) + return UNZ_ERRNO; + if (ZREAD64(pfile_in_zip_read_info->z_filefunc, + pfile_in_zip_read_info->filestream, + pfile_in_zip_read_info->read_buffer, + uReadThis)!=uReadThis) + return UNZ_ERRNO; + + +# ifndef NOUNCRYPT + if(s->encrypted) + { + uInt i; + for(i=0;iread_buffer[i] = + zdecode(s->keys,s->pcrc_32_tab, + pfile_in_zip_read_info->read_buffer[i]); + } +# endif + + + pfile_in_zip_read_info->pos_in_zipfile += uReadThis; + + pfile_in_zip_read_info->rest_read_compressed-=uReadThis; + + pfile_in_zip_read_info->stream.next_in = + (Bytef*)pfile_in_zip_read_info->read_buffer; + pfile_in_zip_read_info->stream.avail_in = (uInt)uReadThis; + } + + if ((pfile_in_zip_read_info->compression_method==0) || (pfile_in_zip_read_info->raw)) + { + uInt uDoCopy,i ; + + if ((pfile_in_zip_read_info->stream.avail_in == 0) && + (pfile_in_zip_read_info->rest_read_compressed == 0)) + return (iRead==0) ? UNZ_EOF : iRead; + + if (pfile_in_zip_read_info->stream.avail_out < + pfile_in_zip_read_info->stream.avail_in) + uDoCopy = pfile_in_zip_read_info->stream.avail_out ; + else + uDoCopy = pfile_in_zip_read_info->stream.avail_in ; + + for (i=0;istream.next_out+i) = + *(pfile_in_zip_read_info->stream.next_in+i); + + pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uDoCopy; + + pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32, + pfile_in_zip_read_info->stream.next_out, + uDoCopy); + pfile_in_zip_read_info->rest_read_uncompressed-=uDoCopy; + pfile_in_zip_read_info->stream.avail_in -= uDoCopy; + pfile_in_zip_read_info->stream.avail_out -= uDoCopy; + pfile_in_zip_read_info->stream.next_out += uDoCopy; + pfile_in_zip_read_info->stream.next_in += uDoCopy; + pfile_in_zip_read_info->stream.total_out += uDoCopy; + iRead += uDoCopy; + } + else if (pfile_in_zip_read_info->compression_method==Z_BZIP2ED) + { +#ifdef HAVE_BZIP2 + uLong uTotalOutBefore,uTotalOutAfter; + const Bytef *bufBefore; + uLong uOutThis; + + pfile_in_zip_read_info->bstream.next_in = (char*)pfile_in_zip_read_info->stream.next_in; + pfile_in_zip_read_info->bstream.avail_in = pfile_in_zip_read_info->stream.avail_in; + pfile_in_zip_read_info->bstream.total_in_lo32 = pfile_in_zip_read_info->stream.total_in; + pfile_in_zip_read_info->bstream.total_in_hi32 = 0; + pfile_in_zip_read_info->bstream.next_out = (char*)pfile_in_zip_read_info->stream.next_out; + pfile_in_zip_read_info->bstream.avail_out = pfile_in_zip_read_info->stream.avail_out; + pfile_in_zip_read_info->bstream.total_out_lo32 = pfile_in_zip_read_info->stream.total_out; + pfile_in_zip_read_info->bstream.total_out_hi32 = 0; + + uTotalOutBefore = pfile_in_zip_read_info->bstream.total_out_lo32; + bufBefore = (const Bytef *)pfile_in_zip_read_info->bstream.next_out; + + err=BZ2_bzDecompress(&pfile_in_zip_read_info->bstream); + + uTotalOutAfter = pfile_in_zip_read_info->bstream.total_out_lo32; + uOutThis = uTotalOutAfter-uTotalOutBefore; + + pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; + + pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (uInt)(uOutThis)); + pfile_in_zip_read_info->rest_read_uncompressed -= uOutThis; + iRead += (uInt)(uTotalOutAfter - uTotalOutBefore); + + pfile_in_zip_read_info->stream.next_in = (Bytef*)pfile_in_zip_read_info->bstream.next_in; + pfile_in_zip_read_info->stream.avail_in = pfile_in_zip_read_info->bstream.avail_in; + pfile_in_zip_read_info->stream.total_in = pfile_in_zip_read_info->bstream.total_in_lo32; + pfile_in_zip_read_info->stream.next_out = (Bytef*)pfile_in_zip_read_info->bstream.next_out; + pfile_in_zip_read_info->stream.avail_out = pfile_in_zip_read_info->bstream.avail_out; + pfile_in_zip_read_info->stream.total_out = pfile_in_zip_read_info->bstream.total_out_lo32; + + if (err==BZ_STREAM_END) + return (iRead==0) ? UNZ_EOF : iRead; + if (err!=BZ_OK) + break; +#endif + } /* end Z_BZIP2ED */ + else + { + ZPOS64_T uTotalOutBefore,uTotalOutAfter; + const Bytef *bufBefore; + ZPOS64_T uOutThis; + int flush=Z_SYNC_FLUSH; + + uTotalOutBefore = pfile_in_zip_read_info->stream.total_out; + bufBefore = pfile_in_zip_read_info->stream.next_out; + + /* + if ((pfile_in_zip_read_info->rest_read_uncompressed == + pfile_in_zip_read_info->stream.avail_out) && + (pfile_in_zip_read_info->rest_read_compressed == 0)) + flush = Z_FINISH; + */ + err=inflate(&pfile_in_zip_read_info->stream,flush); + + if ((err>=0) && (pfile_in_zip_read_info->stream.msg!=NULL)) + err = Z_DATA_ERROR; + + uTotalOutAfter = pfile_in_zip_read_info->stream.total_out; + uOutThis = uTotalOutAfter-uTotalOutBefore; + + pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; + + pfile_in_zip_read_info->crc32 = + crc32(pfile_in_zip_read_info->crc32,bufBefore, + (uInt)(uOutThis)); + + pfile_in_zip_read_info->rest_read_uncompressed -= + uOutThis; + + iRead += (uInt)(uTotalOutAfter - uTotalOutBefore); + + if (err==Z_STREAM_END) + return (iRead==0) ? UNZ_EOF : iRead; + if (err!=Z_OK) + break; + } + } + + if (err==Z_OK) + return iRead; + return err; +} + + +/* + Give the current position in uncompressed data +*/ +extern z_off_t ZEXPORT unztell (unzFile file) +{ + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return UNZ_PARAMERROR; + + return (z_off_t)pfile_in_zip_read_info->stream.total_out; +} + +extern ZPOS64_T ZEXPORT unztell64 (unzFile file) +{ + + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + if (file==NULL) + return (ZPOS64_T)-1; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return (ZPOS64_T)-1; + + return pfile_in_zip_read_info->total_out_64; +} + + +/* + return 1 if the end of file was reached, 0 elsewhere +*/ +extern int ZEXPORT unzeof (unzFile file) +{ + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return UNZ_PARAMERROR; + + if (pfile_in_zip_read_info->rest_read_uncompressed == 0) + return 1; + else + return 0; +} + + + +/* +Read extra field from the current file (opened by unzOpenCurrentFile) +This is the local-header version of the extra field (sometimes, there is +more info in the local-header version than in the central-header) + + if buf==NULL, it return the size of the local extra field that can be read + + if buf!=NULL, len is the size of the buffer, the extra header is copied in + buf. + the return value is the number of bytes copied in buf, or (if <0) + the error code +*/ +extern int ZEXPORT unzGetLocalExtrafield (unzFile file, voidp buf, unsigned len) +{ + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + uInt read_now; + ZPOS64_T size_to_read; + + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return UNZ_PARAMERROR; + + size_to_read = (pfile_in_zip_read_info->size_local_extrafield - + pfile_in_zip_read_info->pos_local_extrafield); + + if (buf==NULL) + return (int)size_to_read; + + if (len>size_to_read) + read_now = (uInt)size_to_read; + else + read_now = (uInt)len ; + + if (read_now==0) + return 0; + + if (ZSEEK64(pfile_in_zip_read_info->z_filefunc, + pfile_in_zip_read_info->filestream, + pfile_in_zip_read_info->offset_local_extrafield + + pfile_in_zip_read_info->pos_local_extrafield, + ZLIB_FILEFUNC_SEEK_SET)!=0) + return UNZ_ERRNO; + + if (ZREAD64(pfile_in_zip_read_info->z_filefunc, + pfile_in_zip_read_info->filestream, + buf,read_now)!=read_now) + return UNZ_ERRNO; + + return (int)read_now; +} + +/* + Close the file in zip opened with unzOpenCurrentFile + Return UNZ_CRCERROR if all the file was read but the CRC is not good +*/ +extern int ZEXPORT unzCloseCurrentFile (unzFile file) +{ + int err=UNZ_OK; + + unz64_s* s; + file_in_zip64_read_info_s* pfile_in_zip_read_info; + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + pfile_in_zip_read_info=s->pfile_in_zip_read; + + if (pfile_in_zip_read_info==NULL) + return UNZ_PARAMERROR; + + + if ((pfile_in_zip_read_info->rest_read_uncompressed == 0) && + (!pfile_in_zip_read_info->raw)) + { + if (pfile_in_zip_read_info->crc32 != pfile_in_zip_read_info->crc32_wait) + err=UNZ_CRCERROR; + } + + + TRYFREE(pfile_in_zip_read_info->read_buffer); + pfile_in_zip_read_info->read_buffer = NULL; + if (pfile_in_zip_read_info->stream_initialised == Z_DEFLATED) + inflateEnd(&pfile_in_zip_read_info->stream); +#ifdef HAVE_BZIP2 + else if (pfile_in_zip_read_info->stream_initialised == Z_BZIP2ED) + BZ2_bzDecompressEnd(&pfile_in_zip_read_info->bstream); +#endif + + + pfile_in_zip_read_info->stream_initialised = 0; + TRYFREE(pfile_in_zip_read_info); + + s->pfile_in_zip_read=NULL; + + return err; +} + + +/* + Get the global comment string of the ZipFile, in the szComment buffer. + uSizeBuf is the size of the szComment buffer. + return the number of byte copied or an error code <0 +*/ +extern int ZEXPORT unzGetGlobalComment (unzFile file, char * szComment, uLong uSizeBuf) +{ + unz64_s* s; + uLong uReadThis ; + if (file==NULL) + return (int)UNZ_PARAMERROR; + s=(unz64_s*)file; + + uReadThis = uSizeBuf; + if (uReadThis>s->gi.size_comment) + uReadThis = s->gi.size_comment; + + if (ZSEEK64(s->z_filefunc,s->filestream,s->central_pos+22,ZLIB_FILEFUNC_SEEK_SET)!=0) + return UNZ_ERRNO; + + if (uReadThis>0) + { + *szComment='\0'; + if (ZREAD64(s->z_filefunc,s->filestream,szComment,uReadThis)!=uReadThis) + return UNZ_ERRNO; + } + + if ((szComment != NULL) && (uSizeBuf > s->gi.size_comment)) + *(szComment+s->gi.size_comment)='\0'; + return (int)uReadThis; +} + +/* Additions by RX '2004 */ +extern ZPOS64_T ZEXPORT unzGetOffset64(unzFile file) +{ + unz64_s* s; + + if (file==NULL) + return 0; /* UNZ_PARAMERROR; */ + s=(unz64_s*)file; + if (!s->current_file_ok) + return 0; + if (s->gi.number_entry != 0 && s->gi.number_entry != 0xffff) + if (s->num_file==s->gi.number_entry) + return 0; + return s->pos_in_central_dir; +} + +extern uLong ZEXPORT unzGetOffset (unzFile file) +{ + ZPOS64_T offset64; + + if (file==NULL) + return 0; /* UNZ_PARAMERROR */ + offset64 = unzGetOffset64(file); + return (uLong)offset64; +} + +extern int ZEXPORT unzSetOffset64(unzFile file, ZPOS64_T pos) +{ + unz64_s* s; + int err; + + if (file==NULL) + return UNZ_PARAMERROR; + s=(unz64_s*)file; + + s->pos_in_central_dir = pos; + s->num_file = s->gi.number_entry; /* hack */ + err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, + &s->cur_file_info_internal, + NULL,0,NULL,0,NULL,0); + s->current_file_ok = (err == UNZ_OK); + return err; +} + +extern int ZEXPORT unzSetOffset (unzFile file, uLong pos) +{ + return unzSetOffset64(file,pos); +} diff --git a/src/third_party/minizip/unzip.h b/src/third_party/minizip/unzip.h new file mode 100644 index 000000000..2104e3915 --- /dev/null +++ b/src/third_party/minizip/unzip.h @@ -0,0 +1,437 @@ +/* unzip.h -- IO for uncompress .zip files using zlib + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications of Unzip for Zip64 + Copyright (C) 2007-2008 Even Rouault + + Modifications for Zip64 support on both zip and unzip + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + --------------------------------------------------------------------------------- + + Condition of use and distribution are the same than zlib : + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + --------------------------------------------------------------------------------- + + Changes + + See header of unzip64.c + +*/ + +#ifndef _unz64_H +#define _unz64_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _ZLIB_H +#include "zlib.h" +#endif + +#ifndef _ZLIBIOAPI_H +#include "ioapi.h" +#endif + +#ifdef HAVE_BZIP2 +#include "bzlib.h" +#endif + +#define Z_BZIP2ED 12 + +#if defined(STRICTUNZIP) || defined(STRICTZIPUNZIP) +/* like the STRICT of WIN32, we define a pointer that cannot be converted + from (void*) without cast */ +typedef struct TagunzFile__ { int unused; } unzFile__; +typedef unzFile__ *unzFile; +#else +typedef voidp unzFile; +#endif + + +#define UNZ_OK (0) +#define UNZ_END_OF_LIST_OF_FILE (-100) +#define UNZ_ERRNO (Z_ERRNO) +#define UNZ_EOF (0) +#define UNZ_PARAMERROR (-102) +#define UNZ_BADZIPFILE (-103) +#define UNZ_INTERNALERROR (-104) +#define UNZ_CRCERROR (-105) + +/* tm_unz contain date/time info */ +typedef struct tm_unz_s +{ + uInt tm_sec; /* seconds after the minute - [0,59] */ + uInt tm_min; /* minutes after the hour - [0,59] */ + uInt tm_hour; /* hours since midnight - [0,23] */ + uInt tm_mday; /* day of the month - [1,31] */ + uInt tm_mon; /* months since January - [0,11] */ + uInt tm_year; /* years - [1980..2044] */ +} tm_unz; + +/* unz_global_info structure contain global data about the ZIPfile + These data comes from the end of central dir */ +typedef struct unz_global_info64_s +{ + ZPOS64_T number_entry; /* total number of entries in + the central dir on this disk */ + uLong size_comment; /* size of the global comment of the zipfile */ +} unz_global_info64; + +typedef struct unz_global_info_s +{ + uLong number_entry; /* total number of entries in + the central dir on this disk */ + uLong size_comment; /* size of the global comment of the zipfile */ +} unz_global_info; + +/* unz_file_info contain information about a file in the zipfile */ +typedef struct unz_file_info64_s +{ + uLong version; /* version made by 2 bytes */ + uLong version_needed; /* version needed to extract 2 bytes */ + uLong flag; /* general purpose bit flag 2 bytes */ + uLong compression_method; /* compression method 2 bytes */ + uLong dosDate; /* last mod file date in Dos fmt 4 bytes */ + uLong crc; /* crc-32 4 bytes */ + ZPOS64_T compressed_size; /* compressed size 8 bytes */ + ZPOS64_T uncompressed_size; /* uncompressed size 8 bytes */ + uLong size_filename; /* filename length 2 bytes */ + uLong size_file_extra; /* extra field length 2 bytes */ + uLong size_file_comment; /* file comment length 2 bytes */ + + uLong disk_num_start; /* disk number start 2 bytes */ + uLong internal_fa; /* internal file attributes 2 bytes */ + uLong external_fa; /* external file attributes 4 bytes */ + + tm_unz tmu_date; +} unz_file_info64; + +typedef struct unz_file_info_s +{ + uLong version; /* version made by 2 bytes */ + uLong version_needed; /* version needed to extract 2 bytes */ + uLong flag; /* general purpose bit flag 2 bytes */ + uLong compression_method; /* compression method 2 bytes */ + uLong dosDate; /* last mod file date in Dos fmt 4 bytes */ + uLong crc; /* crc-32 4 bytes */ + uLong compressed_size; /* compressed size 4 bytes */ + uLong uncompressed_size; /* uncompressed size 4 bytes */ + uLong size_filename; /* filename length 2 bytes */ + uLong size_file_extra; /* extra field length 2 bytes */ + uLong size_file_comment; /* file comment length 2 bytes */ + + uLong disk_num_start; /* disk number start 2 bytes */ + uLong internal_fa; /* internal file attributes 2 bytes */ + uLong external_fa; /* external file attributes 4 bytes */ + + tm_unz tmu_date; +} unz_file_info; + +extern int ZEXPORT unzStringFileNameCompare OF ((const char* fileName1, + const char* fileName2, + int iCaseSensitivity)); +/* + Compare two filename (fileName1,fileName2). + If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp) + If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi + or strcasecmp) + If iCaseSenisivity = 0, case sensitivity is defaut of your operating system + (like 1 on Unix, 2 on Windows) +*/ + + +extern unzFile ZEXPORT unzOpen OF((const char *path)); +extern unzFile ZEXPORT unzOpen64 OF((const void *path)); +/* + Open a Zip file. path contain the full pathname (by example, + on a Windows XP computer "c:\\zlib\\zlib113.zip" or on an Unix computer + "zlib/zlib113.zip". + If the zipfile cannot be opened (file don't exist or in not valid), the + return value is NULL. + Else, the return value is a unzFile Handle, usable with other function + of this unzip package. + the "64" function take a const void* pointer, because the path is just the + value passed to the open64_file_func callback. + Under Windows, if UNICODE is defined, using fill_fopen64_filefunc, the path + is a pointer to a wide unicode string (LPCTSTR is LPCWSTR), so const char* + does not describe the reality +*/ + + +extern unzFile ZEXPORT unzOpen2 OF((const char *path, + zlib_filefunc_def* pzlib_filefunc_def)); +/* + Open a Zip file, like unzOpen, but provide a set of file low level API + for read/write the zip file (see ioapi.h) +*/ + +extern unzFile ZEXPORT unzOpen2_64 OF((const void *path, + zlib_filefunc64_def* pzlib_filefunc_def)); +/* + Open a Zip file, like unz64Open, but provide a set of file low level API + for read/write the zip file (see ioapi.h) +*/ + +extern int ZEXPORT unzClose OF((unzFile file)); +/* + Close a ZipFile opened with unzOpen. + If there is files inside the .Zip opened with unzOpenCurrentFile (see later), + these files MUST be closed with unzCloseCurrentFile before call unzClose. + return UNZ_OK if there is no problem. */ + +extern int ZEXPORT unzGetGlobalInfo OF((unzFile file, + unz_global_info *pglobal_info)); + +extern int ZEXPORT unzGetGlobalInfo64 OF((unzFile file, + unz_global_info64 *pglobal_info)); +/* + Write info about the ZipFile in the *pglobal_info structure. + No preparation of the structure is needed + return UNZ_OK if there is no problem. */ + + +extern int ZEXPORT unzGetGlobalComment OF((unzFile file, + char *szComment, + uLong uSizeBuf)); +/* + Get the global comment string of the ZipFile, in the szComment buffer. + uSizeBuf is the size of the szComment buffer. + return the number of byte copied or an error code <0 +*/ + + +/***************************************************************************/ +/* Unzip package allow you browse the directory of the zipfile */ + +extern int ZEXPORT unzGoToFirstFile OF((unzFile file)); +/* + Set the current file of the zipfile to the first file. + return UNZ_OK if there is no problem +*/ + +extern int ZEXPORT unzGoToNextFile OF((unzFile file)); +/* + Set the current file of the zipfile to the next file. + return UNZ_OK if there is no problem + return UNZ_END_OF_LIST_OF_FILE if the actual file was the latest. +*/ + +extern int ZEXPORT unzLocateFile OF((unzFile file, + const char *szFileName, + int iCaseSensitivity)); +/* + Try locate the file szFileName in the zipfile. + For the iCaseSensitivity signification, see unzStringFileNameCompare + + return value : + UNZ_OK if the file is found. It becomes the current file. + UNZ_END_OF_LIST_OF_FILE if the file is not found +*/ + + +/* ****************************************** */ +/* Ryan supplied functions */ +/* unz_file_info contain information about a file in the zipfile */ +typedef struct unz_file_pos_s +{ + uLong pos_in_zip_directory; /* offset in zip file directory */ + uLong num_of_file; /* # of file */ +} unz_file_pos; + +extern int ZEXPORT unzGetFilePos( + unzFile file, + unz_file_pos* file_pos); + +extern int ZEXPORT unzGoToFilePos( + unzFile file, + unz_file_pos* file_pos); + +typedef struct unz64_file_pos_s +{ + ZPOS64_T pos_in_zip_directory; /* offset in zip file directory */ + ZPOS64_T num_of_file; /* # of file */ +} unz64_file_pos; + +extern int ZEXPORT unzGetFilePos64( + unzFile file, + unz64_file_pos* file_pos); + +extern int ZEXPORT unzGoToFilePos64( + unzFile file, + const unz64_file_pos* file_pos); + +/* ****************************************** */ + +extern int ZEXPORT unzGetCurrentFileInfo64 OF((unzFile file, + unz_file_info64 *pfile_info, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize)); + +extern int ZEXPORT unzGetCurrentFileInfo OF((unzFile file, + unz_file_info *pfile_info, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize)); +/* + Get Info about the current file + if pfile_info!=NULL, the *pfile_info structure will contain somes info about + the current file + if szFileName!=NULL, the filemane string will be copied in szFileName + (fileNameBufferSize is the size of the buffer) + if extraField!=NULL, the extra field information will be copied in extraField + (extraFieldBufferSize is the size of the buffer). + This is the Central-header version of the extra field + if szComment!=NULL, the comment string of the file will be copied in szComment + (commentBufferSize is the size of the buffer) +*/ + + +/** Addition for GDAL : START */ + +extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64 OF((unzFile file)); + +/** Addition for GDAL : END */ + + +/***************************************************************************/ +/* for reading the content of the current zipfile, you can open it, read data + from it, and close it (you can close it before reading all the file) + */ + +extern int ZEXPORT unzOpenCurrentFile OF((unzFile file)); +/* + Open for reading data the current file in the zipfile. + If there is no error, the return value is UNZ_OK. +*/ + +extern int ZEXPORT unzOpenCurrentFilePassword OF((unzFile file, + const char* password)); +/* + Open for reading data the current file in the zipfile. + password is a crypting password + If there is no error, the return value is UNZ_OK. +*/ + +extern int ZEXPORT unzOpenCurrentFile2 OF((unzFile file, + int* method, + int* level, + int raw)); +/* + Same than unzOpenCurrentFile, but open for read raw the file (not uncompress) + if raw==1 + *method will receive method of compression, *level will receive level of + compression + note : you can set level parameter as NULL (if you did not want known level, + but you CANNOT set method parameter as NULL +*/ + +extern int ZEXPORT unzOpenCurrentFile3 OF((unzFile file, + int* method, + int* level, + int raw, + const char* password)); +/* + Same than unzOpenCurrentFile, but open for read raw the file (not uncompress) + if raw==1 + *method will receive method of compression, *level will receive level of + compression + note : you can set level parameter as NULL (if you did not want known level, + but you CANNOT set method parameter as NULL +*/ + + +extern int ZEXPORT unzCloseCurrentFile OF((unzFile file)); +/* + Close the file in zip opened with unzOpenCurrentFile + Return UNZ_CRCERROR if all the file was read but the CRC is not good +*/ + +extern int ZEXPORT unzReadCurrentFile OF((unzFile file, + voidp buf, + unsigned len)); +/* + Read bytes from the current file (opened by unzOpenCurrentFile) + buf contain buffer where data must be copied + len the size of buf. + + return the number of byte copied if somes bytes are copied + return 0 if the end of file was reached + return <0 with error code if there is an error + (UNZ_ERRNO for IO error, or zLib error for uncompress error) +*/ + +extern z_off_t ZEXPORT unztell OF((unzFile file)); + +extern ZPOS64_T ZEXPORT unztell64 OF((unzFile file)); +/* + Give the current position in uncompressed data +*/ + +extern int ZEXPORT unzeof OF((unzFile file)); +/* + return 1 if the end of file was reached, 0 elsewhere +*/ + +extern int ZEXPORT unzGetLocalExtrafield OF((unzFile file, + voidp buf, + unsigned len)); +/* + Read extra field from the current file (opened by unzOpenCurrentFile) + This is the local-header version of the extra field (sometimes, there is + more info in the local-header version than in the central-header) + + if buf==NULL, it return the size of the local extra field + + if buf!=NULL, len is the size of the buffer, the extra header is copied in + buf. + the return value is the number of bytes copied in buf, or (if <0) + the error code +*/ + +/***************************************************************************/ + +/* Get the current file offset */ +extern ZPOS64_T ZEXPORT unzGetOffset64 (unzFile file); +extern uLong ZEXPORT unzGetOffset (unzFile file); + +/* Set the current file offset */ +extern int ZEXPORT unzSetOffset64 (unzFile file, ZPOS64_T pos); +extern int ZEXPORT unzSetOffset (unzFile file, uLong pos); + + + +#ifdef __cplusplus +} +#endif + +#endif /* _unz64_H */ diff --git a/src/third_party/minizip/zip.c b/src/third_party/minizip/zip.c new file mode 100644 index 000000000..a5753f4fe --- /dev/null +++ b/src/third_party/minizip/zip.c @@ -0,0 +1,2005 @@ +/* zip.c -- IO on .zip files using zlib + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + Changes + Oct-2009 - Mathias Svensson - Remove old C style function prototypes + Oct-2009 - Mathias Svensson - Added Zip64 Support when creating new file archives + Oct-2009 - Mathias Svensson - Did some code cleanup and refactoring to get better overview of some functions. + Oct-2009 - Mathias Svensson - Added zipRemoveExtraInfoBlock to strip extra field data from its ZIP64 data + It is used when recreting zip archive with RAW when deleting items from a zip. + ZIP64 data is automatically added to items that needs it, and existing ZIP64 data need to be removed. + Oct-2009 - Mathias Svensson - Added support for BZIP2 as compression mode (bzip2 lib is required) + Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer + +*/ + + +#include +#include +#include +#include +#include "zlib.h" +#include "zip.h" + +#ifdef STDC +# include +# include +# include +#endif +#ifdef NO_ERRNO_H + extern int errno; +#else +# include +#endif + +#ifndef NOCRYPT +#define INCLUDECRYPTINGCODE_IFCRYPTALLOWED +#include "crypt.h" +#endif + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +#ifndef VERSIONMADEBY +# define VERSIONMADEBY (0x0) /* platform depedent */ +#endif + +#ifndef Z_BUFSIZE +#define Z_BUFSIZE (64*1024) /* (16384) */ +#endif + +#ifndef Z_MAXFILENAMEINZIP +#define Z_MAXFILENAMEINZIP (256) +#endif + +#ifndef ALLOC +# define ALLOC(size) (malloc(size)) +#endif +#ifndef TRYFREE +# define TRYFREE(p) {if (p) free(p);} +#endif + +/* +#define SIZECENTRALDIRITEM (0x2e) +#define SIZEZIPLOCALHEADER (0x1e) +*/ + +/* I've found an old Unix (a SunOS 4.1.3_U1) without all SEEK_* defined.... */ + + +/* NOT sure that this work on ALL platform */ +#define MAKEULONG64(a, b) ((ZPOS64_T)(((unsigned long)(a)) | ((ZPOS64_T)((unsigned long)(b))) << 32)) + +#ifndef SEEK_CUR +#define SEEK_CUR 1 +#endif + +#ifndef SEEK_END +#define SEEK_END 2 +#endif + +#ifndef SEEK_SET +#define SEEK_SET 0 +#endif + +#ifndef DEF_MEM_LEVEL +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif +#endif +const char zip_copyright[] =" zip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll"; + + +#define SIZEDATA_INDATABLOCK (4096-(4*4)) + +#define LOCALHEADERMAGIC (0x04034b50) +#define CENTRALHEADERMAGIC (0x02014b50) +#define ENDHEADERMAGIC (0x06054b50) +#define ZIP64ENDHEADERMAGIC (0x6064b50) +#define ZIP64ENDLOCHEADERMAGIC (0x7064b50) + +#define FLAG_LOCALHEADER_OFFSET (0x06) +#define CRC_LOCALHEADER_OFFSET (0x0e) + +#define SIZECENTRALHEADER (0x2e) /* 46 */ + +typedef struct linkedlist_datablock_internal_s +{ + struct linkedlist_datablock_internal_s* next_datablock; + uLong avail_in_this_block; + uLong filled_in_this_block; + uLong unused; /* for future use and alignment */ + unsigned char data[SIZEDATA_INDATABLOCK]; +} linkedlist_datablock_internal; + +typedef struct linkedlist_data_s +{ + linkedlist_datablock_internal* first_block; + linkedlist_datablock_internal* last_block; +} linkedlist_data; + + +typedef struct +{ + z_stream stream; /* zLib stream structure for inflate */ +#ifdef HAVE_BZIP2 + bz_stream bstream; /* bzLib stream structure for bziped */ +#endif + + int stream_initialised; /* 1 is stream is initialised */ + uInt pos_in_buffered_data; /* last written byte in buffered_data */ + + ZPOS64_T pos_local_header; /* offset of the local header of the file + currenty writing */ + char* central_header; /* central header data for the current file */ + uLong size_centralExtra; + uLong size_centralheader; /* size of the central header for cur file */ + uLong size_centralExtraFree; /* Extra bytes allocated to the centralheader but that are not used */ + uLong flag; /* flag of the file currently writing */ + + int method; /* compression method of file currenty wr.*/ + int raw; /* 1 for directly writing raw data */ + Byte buffered_data[Z_BUFSIZE];/* buffer contain compressed data to be writ*/ + uLong dosDate; + uLong crc32; + int encrypt; + int zip64; /* Add ZIP64 extened information in the extra field */ + ZPOS64_T pos_zip64extrainfo; + ZPOS64_T totalCompressedData; + ZPOS64_T totalUncompressedData; +#ifndef NOCRYPT + unsigned long keys[3]; /* keys defining the pseudo-random sequence */ + const z_crc_t* pcrc_32_tab; + int crypt_header_size; +#endif +} curfile64_info; + +typedef struct +{ + zlib_filefunc64_32_def z_filefunc; + voidpf filestream; /* io structore of the zipfile */ + linkedlist_data central_dir;/* datablock with central dir in construction*/ + int in_opened_file_inzip; /* 1 if a file in the zip is currently writ.*/ + curfile64_info ci; /* info on the file curretly writing */ + + ZPOS64_T begin_pos; /* position of the beginning of the zipfile */ + ZPOS64_T add_position_when_writing_offset; + ZPOS64_T number_entry; + +#ifndef NO_ADDFILEINEXISTINGZIP + char *globalcomment; +#endif + +} zip64_internal; + +local linkedlist_datablock_internal* allocate_new_datablock() +{ + linkedlist_datablock_internal* ldi; + ldi = (linkedlist_datablock_internal*) + ALLOC(sizeof(linkedlist_datablock_internal)); + if (ldi!=NULL) + { + ldi->next_datablock = NULL ; + ldi->filled_in_this_block = 0 ; + ldi->avail_in_this_block = SIZEDATA_INDATABLOCK ; + } + return ldi; +} + +local void free_datablock(linkedlist_datablock_internal* ldi) +{ + while (ldi!=NULL) + { + linkedlist_datablock_internal* ldinext = ldi->next_datablock; + TRYFREE(ldi); + ldi = ldinext; + } +} + +local void init_linkedlist(linkedlist_data* ll) +{ + ll->first_block = ll->last_block = NULL; +} + +local void free_linkedlist(linkedlist_data* ll) +{ + free_datablock(ll->first_block); + ll->first_block = ll->last_block = NULL; +} + + +local int add_data_in_datablock(linkedlist_data* ll, const void* buf, uLong len) +{ + linkedlist_datablock_internal* ldi; + const unsigned char* from_copy; + + if (ll==NULL) + return ZIP_INTERNALERROR; + + if (ll->last_block == NULL) + { + ll->first_block = ll->last_block = allocate_new_datablock(); + if (ll->first_block == NULL) + return ZIP_INTERNALERROR; + } + + ldi = ll->last_block; + from_copy = (unsigned char*)buf; + + while (len>0) + { + uInt copy_this; + uInt i; + unsigned char* to_copy; + + if (ldi->avail_in_this_block==0) + { + ldi->next_datablock = allocate_new_datablock(); + if (ldi->next_datablock == NULL) + return ZIP_INTERNALERROR; + ldi = ldi->next_datablock ; + ll->last_block = ldi; + } + + if (ldi->avail_in_this_block < len) + copy_this = (uInt)ldi->avail_in_this_block; + else + copy_this = (uInt)len; + + to_copy = &(ldi->data[ldi->filled_in_this_block]); + + for (i=0;ifilled_in_this_block += copy_this; + ldi->avail_in_this_block -= copy_this; + from_copy += copy_this ; + len -= copy_this; + } + return ZIP_OK; +} + + + +/****************************************************************************/ + +#ifndef NO_ADDFILEINEXISTINGZIP +/* =========================================================================== + Inputs a long in LSB order to the given file + nbByte == 1, 2 ,4 or 8 (byte, short or long, ZPOS64_T) +*/ + +local int zip64local_putValue OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte)); +local int zip64local_putValue (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte) +{ + unsigned char buf[8]; + int n; + for (n = 0; n < nbByte; n++) + { + buf[n] = (unsigned char)(x & 0xff); + x >>= 8; + } + if (x != 0) + { /* data overflow - hack for ZIP64 (X Roche) */ + for (n = 0; n < nbByte; n++) + { + buf[n] = 0xff; + } + } + + if (ZWRITE64(*pzlib_filefunc_def,filestream,buf,nbByte)!=(uLong)nbByte) + return ZIP_ERRNO; + else + return ZIP_OK; +} + +local void zip64local_putValue_inmemory OF((void* dest, ZPOS64_T x, int nbByte)); +local void zip64local_putValue_inmemory (void* dest, ZPOS64_T x, int nbByte) +{ + unsigned char* buf=(unsigned char*)dest; + int n; + for (n = 0; n < nbByte; n++) { + buf[n] = (unsigned char)(x & 0xff); + x >>= 8; + } + + if (x != 0) + { /* data overflow - hack for ZIP64 */ + for (n = 0; n < nbByte; n++) + { + buf[n] = 0xff; + } + } +} + +/****************************************************************************/ + + +local uLong zip64local_TmzDateToDosDate(const tm_zip* ptm) +{ + uLong year = (uLong)ptm->tm_year; + if (year>=1980) + year-=1980; + else if (year>=80) + year-=80; + return + (uLong) (((ptm->tm_mday) + (32 * (ptm->tm_mon+1)) + (512 * year)) << 16) | + ((ptm->tm_sec/2) + (32* ptm->tm_min) + (2048 * (uLong)ptm->tm_hour)); +} + + +/****************************************************************************/ + +local int zip64local_getByte OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi)); + +local int zip64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def,voidpf filestream,int* pi) +{ + unsigned char c; + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); + if (err==1) + { + *pi = (int)c; + return ZIP_OK; + } + else + { + if (ZERROR64(*pzlib_filefunc_def,filestream)) + return ZIP_ERRNO; + else + return ZIP_EOF; + } +} + + +/* =========================================================================== + Reads a long in LSB order from the given gz_stream. Sets +*/ +local int zip64local_getShort OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); + +local int zip64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) +{ + uLong x ; + int i = 0; + int err; + + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (uLong)i; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((uLong)i)<<8; + + if (err==ZIP_OK) + *pX = x; + else + *pX = 0; + return err; +} + +local int zip64local_getLong OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); + +local int zip64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) +{ + uLong x ; + int i = 0; + int err; + + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (uLong)i; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((uLong)i)<<8; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((uLong)i)<<16; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((uLong)i)<<24; + + if (err==ZIP_OK) + *pX = x; + else + *pX = 0; + return err; +} + +local int zip64local_getLong64 OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX)); + + +local int zip64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX) +{ + ZPOS64_T x; + int i = 0; + int err; + + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x = (ZPOS64_T)i; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<8; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<16; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<24; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<32; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<40; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<48; + + if (err==ZIP_OK) + err = zip64local_getByte(pzlib_filefunc_def,filestream,&i); + x += ((ZPOS64_T)i)<<56; + + if (err==ZIP_OK) + *pX = x; + else + *pX = 0; + + return err; +} + +#ifndef BUFREADCOMMENT +#define BUFREADCOMMENT (0x400) +#endif +/* + Locate the Central directory of a zipfile (at the end, just before + the global comment) +*/ +local ZPOS64_T zip64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); + +local ZPOS64_T zip64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) +{ + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackReaduMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && + ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) + { + uPosFound = uReadPos+i; + break; + } + + if (uPosFound!=0) + break; + } + TRYFREE(buf); + return uPosFound; +} + +/* +Locate the End of Zip64 Central directory locator and from there find the CD of a zipfile (at the end, just before +the global comment) +*/ +local ZPOS64_T zip64local_SearchCentralDir64 OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); + +local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) +{ + unsigned char* buf; + ZPOS64_T uSizeFile; + ZPOS64_T uBackRead; + ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ + ZPOS64_T uPosFound=0; + uLong uL; + ZPOS64_T relativeOffset; + + if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) + return 0; + + uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); + + if (uMaxBack>uSizeFile) + uMaxBack = uSizeFile; + + buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); + if (buf==NULL) + return 0; + + uBackRead = 4; + while (uBackReaduMaxBack) + uBackRead = uMaxBack; + else + uBackRead+=BUFREADCOMMENT; + uReadPos = uSizeFile-uBackRead ; + + uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? + (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); + if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) + break; + + if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) + break; + + for (i=(int)uReadSize-3; (i--)>0;) + { + /* Signature "0x07064b50" Zip64 end of central directory locater */ + if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x06) && ((*(buf+i+3))==0x07)) + { + uPosFound = uReadPos+i; + break; + } + } + + if (uPosFound!=0) + break; + } + + TRYFREE(buf); + if (uPosFound == 0) + return 0; + + /* Zip64 end of central directory locator */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, uPosFound,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature, already checked */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + + /* number of the disk with the start of the zip64 end of central directory */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + if (uL != 0) + return 0; + + /* relative offset of the zip64 end of central directory record */ + if (zip64local_getLong64(pzlib_filefunc_def,filestream,&relativeOffset)!=ZIP_OK) + return 0; + + /* total number of disks */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + if (uL != 1) + return 0; + + /* Goto Zip64 end of central directory record */ + if (ZSEEK64(*pzlib_filefunc_def,filestream, relativeOffset,ZLIB_FILEFUNC_SEEK_SET)!=0) + return 0; + + /* the signature */ + if (zip64local_getLong(pzlib_filefunc_def,filestream,&uL)!=ZIP_OK) + return 0; + + if (uL != 0x06064b50) /* signature of 'Zip64 end of central directory' */ + return 0; + + return relativeOffset; +} + +int LoadCentralDirectoryRecord(zip64_internal* pziinit) +{ + int err=ZIP_OK; + ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ + + ZPOS64_T size_central_dir; /* size of the central directory */ + ZPOS64_T offset_central_dir; /* offset of start of central directory */ + ZPOS64_T central_pos; + uLong uL; + + uLong number_disk; /* number of the current dist, used for + spaning ZIP, unsupported, always 0*/ + uLong number_disk_with_CD; /* number the the disk with central dir, used + for spaning ZIP, unsupported, always 0*/ + ZPOS64_T number_entry; + ZPOS64_T number_entry_CD; /* total number of entries in + the central dir + (same than number_entry on nospan) */ + uLong VersionMadeBy; + uLong VersionNeeded; + uLong size_comment; + + int hasZIP64Record = 0; + + /* check first if we find a ZIP64 record */ + central_pos = zip64local_SearchCentralDir64(&pziinit->z_filefunc,pziinit->filestream); + if(central_pos > 0) + { + hasZIP64Record = 1; + } + else if(central_pos == 0) + { + central_pos = zip64local_SearchCentralDir(&pziinit->z_filefunc,pziinit->filestream); + } + +/* disable to allow appending to empty ZIP archive + if (central_pos==0) + err=ZIP_ERRNO; +*/ + + if(hasZIP64Record) + { + ZPOS64_T sizeEndOfCentralDirectory; + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, central_pos, ZLIB_FILEFUNC_SEEK_SET) != 0) + err=ZIP_ERRNO; + + /* the signature, already checked */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&uL)!=ZIP_OK) + err=ZIP_ERRNO; + + /* size of zip64 end of central directory record */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream, &sizeEndOfCentralDirectory)!=ZIP_OK) + err=ZIP_ERRNO; + + /* version made by */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &VersionMadeBy)!=ZIP_OK) + err=ZIP_ERRNO; + + /* version needed to extract */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &VersionNeeded)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of this disk */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&number_disk)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of the disk with the start of the central directory */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&number_disk_with_CD)!=ZIP_OK) + err=ZIP_ERRNO; + + /* total number of entries in the central directory on this disk */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream, &number_entry)!=ZIP_OK) + err=ZIP_ERRNO; + + /* total number of entries in the central directory */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream,&number_entry_CD)!=ZIP_OK) + err=ZIP_ERRNO; + + if ((number_entry_CD!=number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) + err=ZIP_BADZIPFILE; + + /* size of the central directory */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream,&size_central_dir)!=ZIP_OK) + err=ZIP_ERRNO; + + /* offset of start of central directory with respect to the + starting disk number */ + if (zip64local_getLong64(&pziinit->z_filefunc, pziinit->filestream,&offset_central_dir)!=ZIP_OK) + err=ZIP_ERRNO; + + /* TODO.. + read the comment from the standard central header. */ + size_comment = 0; + } + else + { + /* Read End of central Directory info */ + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) + err=ZIP_ERRNO; + + /* the signature, already checked */ + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream,&uL)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of this disk */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream,&number_disk)!=ZIP_OK) + err=ZIP_ERRNO; + + /* number of the disk with the start of the central directory */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream,&number_disk_with_CD)!=ZIP_OK) + err=ZIP_ERRNO; + + /* total number of entries in the central dir on this disk */ + number_entry = 0; + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + number_entry = uL; + + /* total number of entries in the central dir */ + number_entry_CD = 0; + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + number_entry_CD = uL; + + if ((number_entry_CD!=number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) + err=ZIP_BADZIPFILE; + + /* size of the central directory */ + size_central_dir = 0; + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + size_central_dir = uL; + + /* offset of start of central directory with respect to the starting disk number */ + offset_central_dir = 0; + if (zip64local_getLong(&pziinit->z_filefunc, pziinit->filestream, &uL)!=ZIP_OK) + err=ZIP_ERRNO; + else + offset_central_dir = uL; + + + /* zipfile global comment length */ + if (zip64local_getShort(&pziinit->z_filefunc, pziinit->filestream, &size_comment)!=ZIP_OK) + err=ZIP_ERRNO; + } + + if ((central_posz_filefunc, pziinit->filestream); + return ZIP_ERRNO; + } + + if (size_comment>0) + { + pziinit->globalcomment = (char*)ALLOC(size_comment+1); + if (pziinit->globalcomment) + { + size_comment = ZREAD64(pziinit->z_filefunc, pziinit->filestream, pziinit->globalcomment,size_comment); + pziinit->globalcomment[size_comment]=0; + } + } + + byte_before_the_zipfile = central_pos - (offset_central_dir+size_central_dir); + pziinit->add_position_when_writing_offset = byte_before_the_zipfile; + + { + ZPOS64_T size_central_dir_to_read = size_central_dir; + size_t buf_size = SIZEDATA_INDATABLOCK; + void* buf_read = (void*)ALLOC(buf_size); + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, offset_central_dir + byte_before_the_zipfile, ZLIB_FILEFUNC_SEEK_SET) != 0) + err=ZIP_ERRNO; + + while ((size_central_dir_to_read>0) && (err==ZIP_OK)) + { + ZPOS64_T read_this = SIZEDATA_INDATABLOCK; + if (read_this > size_central_dir_to_read) + read_this = size_central_dir_to_read; + + if (ZREAD64(pziinit->z_filefunc, pziinit->filestream,buf_read,(uLong)read_this) != read_this) + err=ZIP_ERRNO; + + if (err==ZIP_OK) + err = add_data_in_datablock(&pziinit->central_dir,buf_read, (uLong)read_this); + + size_central_dir_to_read-=read_this; + } + TRYFREE(buf_read); + } + pziinit->begin_pos = byte_before_the_zipfile; + pziinit->number_entry = number_entry_CD; + + if (ZSEEK64(pziinit->z_filefunc, pziinit->filestream, offset_central_dir+byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET) != 0) + err=ZIP_ERRNO; + + return err; +} + + +#endif /* !NO_ADDFILEINEXISTINGZIP*/ + + +/************************************************************/ +extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def) +{ + zip64_internal ziinit; + zip64_internal* zi; + int err=ZIP_OK; + + ziinit.z_filefunc.zseek32_file = NULL; + ziinit.z_filefunc.ztell32_file = NULL; + if (pzlib_filefunc64_32_def==NULL) + fill_fopen64_filefunc(&ziinit.z_filefunc.zfile_func64); + else + ziinit.z_filefunc = *pzlib_filefunc64_32_def; + + ziinit.filestream = ZOPEN64(ziinit.z_filefunc, + pathname, + (append == APPEND_STATUS_CREATE) ? + (ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_WRITE | ZLIB_FILEFUNC_MODE_CREATE) : + (ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_WRITE | ZLIB_FILEFUNC_MODE_EXISTING)); + + if (ziinit.filestream == NULL) + return NULL; + + if (append == APPEND_STATUS_CREATEAFTER) + ZSEEK64(ziinit.z_filefunc,ziinit.filestream,0,SEEK_END); + + ziinit.begin_pos = ZTELL64(ziinit.z_filefunc,ziinit.filestream); + ziinit.in_opened_file_inzip = 0; + ziinit.ci.stream_initialised = 0; + ziinit.number_entry = 0; + ziinit.add_position_when_writing_offset = 0; + init_linkedlist(&(ziinit.central_dir)); + + + + zi = (zip64_internal*)ALLOC(sizeof(zip64_internal)); + if (zi==NULL) + { + ZCLOSE64(ziinit.z_filefunc,ziinit.filestream); + return NULL; + } + + /* now we add file in a zipfile */ +# ifndef NO_ADDFILEINEXISTINGZIP + ziinit.globalcomment = NULL; + if (append == APPEND_STATUS_ADDINZIP) + { + /* Read and Cache Central Directory Records */ + err = LoadCentralDirectoryRecord(&ziinit); + } + + if (globalcomment) + { + *globalcomment = ziinit.globalcomment; + } +# endif /* !NO_ADDFILEINEXISTINGZIP*/ + + if (err != ZIP_OK) + { +# ifndef NO_ADDFILEINEXISTINGZIP + TRYFREE(ziinit.globalcomment); +# endif /* !NO_ADDFILEINEXISTINGZIP*/ + TRYFREE(zi); + return NULL; + } + else + { + *zi = ziinit; + return (zipFile)zi; + } +} + +extern zipFile ZEXPORT zipOpen2 (const char *pathname, int append, zipcharpc* globalcomment, zlib_filefunc_def* pzlib_filefunc32_def) +{ + if (pzlib_filefunc32_def != NULL) + { + zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; + fill_zlib_filefunc64_32_def_from_filefunc32(&zlib_filefunc64_32_def_fill,pzlib_filefunc32_def); + return zipOpen3(pathname, append, globalcomment, &zlib_filefunc64_32_def_fill); + } + else + return zipOpen3(pathname, append, globalcomment, NULL); +} + +extern zipFile ZEXPORT zipOpen2_64 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_def* pzlib_filefunc_def) +{ + if (pzlib_filefunc_def != NULL) + { + zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; + zlib_filefunc64_32_def_fill.zfile_func64 = *pzlib_filefunc_def; + zlib_filefunc64_32_def_fill.ztell32_file = NULL; + zlib_filefunc64_32_def_fill.zseek32_file = NULL; + return zipOpen3(pathname, append, globalcomment, &zlib_filefunc64_32_def_fill); + } + else + return zipOpen3(pathname, append, globalcomment, NULL); +} + + + +extern zipFile ZEXPORT zipOpen (const char* pathname, int append) +{ + return zipOpen3((const void*)pathname,append,NULL,NULL); +} + +extern zipFile ZEXPORT zipOpen64 (const void* pathname, int append) +{ + return zipOpen3(pathname,append,NULL,NULL); +} + +int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local) +{ + /* write the local header */ + int err; + uInt size_filename = (uInt)strlen(filename); + uInt size_extrafield = size_extrafield_local; + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)LOCALHEADERMAGIC, 4); + + if (err==ZIP_OK) + { + if(zi->ci.zip64) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)45,2);/* version needed to extract */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)20,2);/* version needed to extract */ + } + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.flag,2); + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.method,2); + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->ci.dosDate,4); + + /* CRC / Compressed size / Uncompressed size will be filled in later and rewritten later */ + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* crc 32, unknown */ + if (err==ZIP_OK) + { + if(zi->ci.zip64) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xFFFFFFFF,4); /* compressed size, unknown */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* compressed size, unknown */ + } + if (err==ZIP_OK) + { + if(zi->ci.zip64) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xFFFFFFFF,4); /* uncompressed size, unknown */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); /* uncompressed size, unknown */ + } + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_filename,2); + + if(zi->ci.zip64) + { + size_extrafield += 20; + } + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_extrafield,2); + + if ((err==ZIP_OK) && (size_filename > 0)) + { + if (ZWRITE64(zi->z_filefunc,zi->filestream,filename,size_filename)!=size_filename) + err = ZIP_ERRNO; + } + + if ((err==ZIP_OK) && (size_extrafield_local > 0)) + { + if (ZWRITE64(zi->z_filefunc, zi->filestream, extrafield_local, size_extrafield_local) != size_extrafield_local) + err = ZIP_ERRNO; + } + + + if ((err==ZIP_OK) && (zi->ci.zip64)) + { + /* write the Zip64 extended info */ + short HeaderID = 1; + short DataSize = 16; + ZPOS64_T CompressedSize = 0; + ZPOS64_T UncompressedSize = 0; + + /* Remember position of Zip64 extended info for the local file header. (needed when we update size after done with file) */ + zi->ci.pos_zip64extrainfo = ZTELL64(zi->z_filefunc,zi->filestream); + + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (short)HeaderID,2); + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (short)DataSize,2); + + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)UncompressedSize,8); + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)CompressedSize,8); + } + + return err; +} + +/* + NOTE. + When writing RAW the ZIP64 extended information in extrafield_local and extrafield_global needs to be stripped + before calling this function it can be done with zipRemoveExtraInfoBlock + + It is not done here because then we need to realloc a new buffer since parameters are 'const' and I want to minimize + unnecessary allocations. + */ +extern int ZEXPORT zipOpenNewFileInZip4_64 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, + uLong versionMadeBy, uLong flagBase, int zip64) +{ + zip64_internal* zi; + uInt size_filename; + uInt size_comment; + uInt i; + int err = ZIP_OK; + +# ifdef NOCRYPT + (crcForCrypting); + if (password != NULL) + return ZIP_PARAMERROR; +# endif + + if (file == NULL) + return ZIP_PARAMERROR; + +#ifdef HAVE_BZIP2 + if ((method!=0) && (method!=Z_DEFLATED) && (method!=Z_BZIP2ED)) + return ZIP_PARAMERROR; +#else + if ((method!=0) && (method!=Z_DEFLATED)) + return ZIP_PARAMERROR; +#endif + + zi = (zip64_internal*)file; + + if (zi->in_opened_file_inzip == 1) + { + err = zipCloseFileInZip (file); + if (err != ZIP_OK) + return err; + } + + if (filename==NULL) + filename="-"; + + if (comment==NULL) + size_comment = 0; + else + size_comment = (uInt)strlen(comment); + + size_filename = (uInt)strlen(filename); + + if (zipfi == NULL) + zi->ci.dosDate = 0; + else + { + if (zipfi->dosDate != 0) + zi->ci.dosDate = zipfi->dosDate; + else + zi->ci.dosDate = zip64local_TmzDateToDosDate(&zipfi->tmz_date); + } + + zi->ci.flag = flagBase; + if ((level==8) || (level==9)) + zi->ci.flag |= 2; + if (level==2) + zi->ci.flag |= 4; + if (level==1) + zi->ci.flag |= 6; + if (password != NULL) + zi->ci.flag |= 1; + + zi->ci.crc32 = 0; + zi->ci.method = method; + zi->ci.encrypt = 0; + zi->ci.stream_initialised = 0; + zi->ci.pos_in_buffered_data = 0; + zi->ci.raw = raw; + zi->ci.pos_local_header = ZTELL64(zi->z_filefunc,zi->filestream); + + zi->ci.size_centralheader = SIZECENTRALHEADER + size_filename + size_extrafield_global + size_comment; + zi->ci.size_centralExtraFree = 32; /* Extra space we have reserved in case we need to add ZIP64 extra info data */ + + zi->ci.central_header = (char*)ALLOC((uInt)zi->ci.size_centralheader + zi->ci.size_centralExtraFree); + + zi->ci.size_centralExtra = size_extrafield_global; + zip64local_putValue_inmemory(zi->ci.central_header,(uLong)CENTRALHEADERMAGIC,4); + /* version info */ + zip64local_putValue_inmemory(zi->ci.central_header+4,(uLong)versionMadeBy,2); + zip64local_putValue_inmemory(zi->ci.central_header+6,(uLong)20,2); + zip64local_putValue_inmemory(zi->ci.central_header+8,(uLong)zi->ci.flag,2); + zip64local_putValue_inmemory(zi->ci.central_header+10,(uLong)zi->ci.method,2); + zip64local_putValue_inmemory(zi->ci.central_header+12,(uLong)zi->ci.dosDate,4); + zip64local_putValue_inmemory(zi->ci.central_header+16,(uLong)0,4); /*crc*/ + zip64local_putValue_inmemory(zi->ci.central_header+20,(uLong)0,4); /*compr size*/ + zip64local_putValue_inmemory(zi->ci.central_header+24,(uLong)0,4); /*uncompr size*/ + zip64local_putValue_inmemory(zi->ci.central_header+28,(uLong)size_filename,2); + zip64local_putValue_inmemory(zi->ci.central_header+30,(uLong)size_extrafield_global,2); + zip64local_putValue_inmemory(zi->ci.central_header+32,(uLong)size_comment,2); + zip64local_putValue_inmemory(zi->ci.central_header+34,(uLong)0,2); /*disk nm start*/ + + if (zipfi==NULL) + zip64local_putValue_inmemory(zi->ci.central_header+36,(uLong)0,2); + else + zip64local_putValue_inmemory(zi->ci.central_header+36,(uLong)zipfi->internal_fa,2); + + if (zipfi==NULL) + zip64local_putValue_inmemory(zi->ci.central_header+38,(uLong)0,4); + else + zip64local_putValue_inmemory(zi->ci.central_header+38,(uLong)zipfi->external_fa,4); + + if(zi->ci.pos_local_header >= 0xffffffff) + zip64local_putValue_inmemory(zi->ci.central_header+42,(uLong)0xffffffff,4); + else + zip64local_putValue_inmemory(zi->ci.central_header+42,(uLong)zi->ci.pos_local_header - zi->add_position_when_writing_offset,4); + + for (i=0;ici.central_header+SIZECENTRALHEADER+i) = *(filename+i); + + for (i=0;ici.central_header+SIZECENTRALHEADER+size_filename+i) = + *(((const char*)extrafield_global)+i); + + for (i=0;ici.central_header+SIZECENTRALHEADER+size_filename+ + size_extrafield_global+i) = *(comment+i); + if (zi->ci.central_header == NULL) + return ZIP_INTERNALERROR; + + zi->ci.zip64 = zip64; + zi->ci.totalCompressedData = 0; + zi->ci.totalUncompressedData = 0; + zi->ci.pos_zip64extrainfo = 0; + + err = Write_LocalFileHeader(zi, filename, size_extrafield_local, extrafield_local); + +#ifdef HAVE_BZIP2 + zi->ci.bstream.avail_in = (uInt)0; + zi->ci.bstream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.bstream.next_out = (char*)zi->ci.buffered_data; + zi->ci.bstream.total_in_hi32 = 0; + zi->ci.bstream.total_in_lo32 = 0; + zi->ci.bstream.total_out_hi32 = 0; + zi->ci.bstream.total_out_lo32 = 0; +#endif + + zi->ci.stream.avail_in = (uInt)0; + zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.stream.next_out = zi->ci.buffered_data; + zi->ci.stream.total_in = 0; + zi->ci.stream.total_out = 0; + zi->ci.stream.data_type = Z_BINARY; + +#ifdef HAVE_BZIP2 + if ((err==ZIP_OK) && (zi->ci.method == Z_DEFLATED || zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) +#else + if ((err==ZIP_OK) && (zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) +#endif + { + if(zi->ci.method == Z_DEFLATED) + { + zi->ci.stream.zalloc = (alloc_func)0; + zi->ci.stream.zfree = (free_func)0; + zi->ci.stream.opaque = (voidpf)0; + + if (windowBits>0) + windowBits = -windowBits; + + err = deflateInit2(&zi->ci.stream, level, Z_DEFLATED, windowBits, memLevel, strategy); + + if (err==Z_OK) + zi->ci.stream_initialised = Z_DEFLATED; + } + else if(zi->ci.method == Z_BZIP2ED) + { +#ifdef HAVE_BZIP2 + /* Init BZip stuff here */ + zi->ci.bstream.bzalloc = 0; + zi->ci.bstream.bzfree = 0; + zi->ci.bstream.opaque = (voidpf)0; + + err = BZ2_bzCompressInit(&zi->ci.bstream, level, 0,35); + if(err == BZ_OK) + zi->ci.stream_initialised = Z_BZIP2ED; +#endif + } + + } + +# ifndef NOCRYPT + zi->ci.crypt_header_size = 0; + if ((err==Z_OK) && (password != NULL)) + { + unsigned char bufHead[RAND_HEAD_LEN]; + unsigned int sizeHead; + zi->ci.encrypt = 1; + zi->ci.pcrc_32_tab = get_crc_table(); + /*init_keys(password,zi->ci.keys,zi->ci.pcrc_32_tab);*/ + + sizeHead=crypthead(password,bufHead,RAND_HEAD_LEN,zi->ci.keys,zi->ci.pcrc_32_tab,crcForCrypting); + zi->ci.crypt_header_size = sizeHead; + + if (ZWRITE64(zi->z_filefunc,zi->filestream,bufHead,sizeHead) != sizeHead) + err = ZIP_ERRNO; + } +# endif + + if (err==Z_OK) + zi->in_opened_file_inzip = 1; + return err; +} + +extern int ZEXPORT zipOpenNewFileInZip4 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, + uLong versionMadeBy, uLong flagBase) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, versionMadeBy, flagBase, 0); +} + +extern int ZEXPORT zipOpenNewFileInZip3 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, VERSIONMADEBY, 0, 0); +} + +extern int ZEXPORT zipOpenNewFileInZip3_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, int zip64) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, VERSIONMADEBY, 0, zip64); +} + +extern int ZEXPORT zipOpenNewFileInZip2(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, 0); +} + +extern int ZEXPORT zipOpenNewFileInZip2_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, int zip64) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, zip64); +} + +extern int ZEXPORT zipOpenNewFileInZip64 (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void*extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int zip64) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, 0, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, zip64); +} + +extern int ZEXPORT zipOpenNewFileInZip (zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void*extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level) +{ + return zipOpenNewFileInZip4_64 (file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, 0, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, 0); +} + +local int zip64FlushWriteBuffer(zip64_internal* zi) +{ + int err=ZIP_OK; + + if (zi->ci.encrypt != 0) + { +#ifndef NOCRYPT + uInt i; + int t; + for (i=0;ici.pos_in_buffered_data;i++) + zi->ci.buffered_data[i] = zencode(zi->ci.keys, zi->ci.pcrc_32_tab, zi->ci.buffered_data[i],t); +#endif + } + + if (ZWRITE64(zi->z_filefunc,zi->filestream,zi->ci.buffered_data,zi->ci.pos_in_buffered_data) != zi->ci.pos_in_buffered_data) + err = ZIP_ERRNO; + + zi->ci.totalCompressedData += zi->ci.pos_in_buffered_data; + +#ifdef HAVE_BZIP2 + if(zi->ci.method == Z_BZIP2ED) + { + zi->ci.totalUncompressedData += zi->ci.bstream.total_in_lo32; + zi->ci.bstream.total_in_lo32 = 0; + zi->ci.bstream.total_in_hi32 = 0; + } + else +#endif + { + zi->ci.totalUncompressedData += zi->ci.stream.total_in; + zi->ci.stream.total_in = 0; + } + + + zi->ci.pos_in_buffered_data = 0; + + return err; +} + +extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned int len) +{ + zip64_internal* zi; + int err=ZIP_OK; + + if (file == NULL) + return ZIP_PARAMERROR; + zi = (zip64_internal*)file; + + if (zi->in_opened_file_inzip == 0) + return ZIP_PARAMERROR; + + zi->ci.crc32 = crc32(zi->ci.crc32,buf,(uInt)len); + +#ifdef HAVE_BZIP2 + if(zi->ci.method == Z_BZIP2ED && (!zi->ci.raw)) + { + zi->ci.bstream.next_in = (void*)buf; + zi->ci.bstream.avail_in = len; + err = BZ_RUN_OK; + + while ((err==BZ_RUN_OK) && (zi->ci.bstream.avail_in>0)) + { + if (zi->ci.bstream.avail_out == 0) + { + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.bstream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.bstream.next_out = (char*)zi->ci.buffered_data; + } + + + if(err != BZ_RUN_OK) + break; + + if ((zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) + { + uLong uTotalOutBefore_lo = zi->ci.bstream.total_out_lo32; +/* uLong uTotalOutBefore_hi = zi->ci.bstream.total_out_hi32; */ + err=BZ2_bzCompress(&zi->ci.bstream, BZ_RUN); + + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.bstream.total_out_lo32 - uTotalOutBefore_lo) ; + } + } + + if(err == BZ_RUN_OK) + err = ZIP_OK; + } + else +#endif + { + zi->ci.stream.next_in = (Bytef*)buf; + zi->ci.stream.avail_in = len; + + while ((err==ZIP_OK) && (zi->ci.stream.avail_in>0)) + { + if (zi->ci.stream.avail_out == 0) + { + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.stream.next_out = zi->ci.buffered_data; + } + + + if(err != ZIP_OK) + break; + + if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) + { + uLong uTotalOutBefore = zi->ci.stream.total_out; + err=deflate(&zi->ci.stream, Z_NO_FLUSH); + if(uTotalOutBefore > zi->ci.stream.total_out) + { + int bBreak = 0; + bBreak++; + } + + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; + } + else + { + uInt copy_this,i; + if (zi->ci.stream.avail_in < zi->ci.stream.avail_out) + copy_this = zi->ci.stream.avail_in; + else + copy_this = zi->ci.stream.avail_out; + + for (i = 0; i < copy_this; i++) + *(((char*)zi->ci.stream.next_out)+i) = + *(((const char*)zi->ci.stream.next_in)+i); + { + zi->ci.stream.avail_in -= copy_this; + zi->ci.stream.avail_out-= copy_this; + zi->ci.stream.next_in+= copy_this; + zi->ci.stream.next_out+= copy_this; + zi->ci.stream.total_in+= copy_this; + zi->ci.stream.total_out+= copy_this; + zi->ci.pos_in_buffered_data += copy_this; + } + } + }/* while(...) */ + } + + return err; +} + +extern int ZEXPORT zipCloseFileInZipRaw (zipFile file, uLong uncompressed_size, uLong crc32) +{ + return zipCloseFileInZipRaw64 (file, uncompressed_size, crc32); +} + +extern int ZEXPORT zipCloseFileInZipRaw64 (zipFile file, ZPOS64_T uncompressed_size, uLong crc32) +{ + zip64_internal* zi; + ZPOS64_T compressed_size; + uLong invalidValue = 0xffffffff; + short datasize = 0; + int err=ZIP_OK; + + if (file == NULL) + return ZIP_PARAMERROR; + zi = (zip64_internal*)file; + + if (zi->in_opened_file_inzip == 0) + return ZIP_PARAMERROR; + zi->ci.stream.avail_in = 0; + + if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) + { + while (err==ZIP_OK) + { + uLong uTotalOutBefore; + if (zi->ci.stream.avail_out == 0) + { + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.stream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.stream.next_out = zi->ci.buffered_data; + } + uTotalOutBefore = zi->ci.stream.total_out; + err=deflate(&zi->ci.stream, Z_FINISH); + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; + } + } + else if ((zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) + { +#ifdef HAVE_BZIP2 + err = BZ_FINISH_OK; + while (err==BZ_FINISH_OK) + { + uLong uTotalOutBefore; + if (zi->ci.bstream.avail_out == 0) + { + if (zip64FlushWriteBuffer(zi) == ZIP_ERRNO) + err = ZIP_ERRNO; + zi->ci.bstream.avail_out = (uInt)Z_BUFSIZE; + zi->ci.bstream.next_out = (char*)zi->ci.buffered_data; + } + uTotalOutBefore = zi->ci.bstream.total_out_lo32; + err=BZ2_bzCompress(&zi->ci.bstream, BZ_FINISH); + if(err == BZ_STREAM_END) + err = Z_STREAM_END; + + zi->ci.pos_in_buffered_data += (uInt)(zi->ci.bstream.total_out_lo32 - uTotalOutBefore); + } + + if(err == BZ_FINISH_OK) + err = ZIP_OK; +#endif + } + + if (err==Z_STREAM_END) + err=ZIP_OK; /* this is normal */ + + if ((zi->ci.pos_in_buffered_data>0) && (err==ZIP_OK)) + { + if (zip64FlushWriteBuffer(zi)==ZIP_ERRNO) + err = ZIP_ERRNO; + } + + if ((zi->ci.method == Z_DEFLATED) && (!zi->ci.raw)) + { + int tmp_err = deflateEnd(&zi->ci.stream); + if (err == ZIP_OK) + err = tmp_err; + zi->ci.stream_initialised = 0; + } +#ifdef HAVE_BZIP2 + else if((zi->ci.method == Z_BZIP2ED) && (!zi->ci.raw)) + { + int tmperr = BZ2_bzCompressEnd(&zi->ci.bstream); + if (err==ZIP_OK) + err = tmperr; + zi->ci.stream_initialised = 0; + } +#endif + + if (!zi->ci.raw) + { + crc32 = (uLong)zi->ci.crc32; + uncompressed_size = zi->ci.totalUncompressedData; + } + compressed_size = zi->ci.totalCompressedData; + +# ifndef NOCRYPT + compressed_size += zi->ci.crypt_header_size; +# endif + + /* update Current Item crc and sizes, */ + if(compressed_size >= 0xffffffff || uncompressed_size >= 0xffffffff || zi->ci.pos_local_header >= 0xffffffff) + { + /*version Made by*/ + zip64local_putValue_inmemory(zi->ci.central_header+4,(uLong)45,2); + /*version needed*/ + zip64local_putValue_inmemory(zi->ci.central_header+6,(uLong)45,2); + + } + + zip64local_putValue_inmemory(zi->ci.central_header+16,crc32,4); /*crc*/ + + + if(compressed_size >= 0xffffffff) + zip64local_putValue_inmemory(zi->ci.central_header+20, invalidValue,4); /*compr size*/ + else + zip64local_putValue_inmemory(zi->ci.central_header+20, compressed_size,4); /*compr size*/ + + /* set internal file attributes field */ + if (zi->ci.stream.data_type == Z_ASCII) + zip64local_putValue_inmemory(zi->ci.central_header+36,(uLong)Z_ASCII,2); + + if(uncompressed_size >= 0xffffffff) + zip64local_putValue_inmemory(zi->ci.central_header+24, invalidValue,4); /*uncompr size*/ + else + zip64local_putValue_inmemory(zi->ci.central_header+24, uncompressed_size,4); /*uncompr size*/ + + /* Add ZIP64 extra info field for uncompressed size */ + if(uncompressed_size >= 0xffffffff) + datasize += 8; + + /* Add ZIP64 extra info field for compressed size */ + if(compressed_size >= 0xffffffff) + datasize += 8; + + /* Add ZIP64 extra info field for relative offset to local file header of current file */ + if(zi->ci.pos_local_header >= 0xffffffff) + datasize += 8; + + if(datasize > 0) + { + char* p = NULL; + + if((uLong)(datasize + 4) > zi->ci.size_centralExtraFree) + { + /* we can not write more data to the buffer that we have room for. */ + return ZIP_BADZIPFILE; + } + + p = zi->ci.central_header + zi->ci.size_centralheader; + + /* Add Extra Information Header for 'ZIP64 information' */ + zip64local_putValue_inmemory(p, 0x0001, 2); /* HeaderID */ + p += 2; + zip64local_putValue_inmemory(p, datasize, 2); /* DataSize */ + p += 2; + + if(uncompressed_size >= 0xffffffff) + { + zip64local_putValue_inmemory(p, uncompressed_size, 8); + p += 8; + } + + if(compressed_size >= 0xffffffff) + { + zip64local_putValue_inmemory(p, compressed_size, 8); + p += 8; + } + + if(zi->ci.pos_local_header >= 0xffffffff) + { + zip64local_putValue_inmemory(p, zi->ci.pos_local_header, 8); + p += 8; + } + + /* Update how much extra free space we got in the memory buffer + and increase the centralheader size so the new ZIP64 fields are included + ( 4 below is the size of HeaderID and DataSize field ) */ + zi->ci.size_centralExtraFree -= datasize + 4; + zi->ci.size_centralheader += datasize + 4; + + /* Update the extra info size field */ + zi->ci.size_centralExtra += datasize + 4; + zip64local_putValue_inmemory(zi->ci.central_header+30,(uLong)zi->ci.size_centralExtra,2); + } + + if (err==ZIP_OK) + err = add_data_in_datablock(&zi->central_dir, zi->ci.central_header, (uLong)zi->ci.size_centralheader); + + free(zi->ci.central_header); + + if (err==ZIP_OK) + { + /* Update the LocalFileHeader with the new values. */ + + ZPOS64_T cur_pos_inzip = ZTELL64(zi->z_filefunc,zi->filestream); + + if (ZSEEK64(zi->z_filefunc,zi->filestream, zi->ci.pos_local_header + 14,ZLIB_FILEFUNC_SEEK_SET)!=0) + err = ZIP_ERRNO; + + if (err==ZIP_OK) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,crc32,4); /* crc 32, unknown */ + + if(uncompressed_size >= 0xffffffff || compressed_size >= 0xffffffff ) + { + if(zi->ci.pos_zip64extrainfo > 0) + { + /* Update the size in the ZIP64 extended field. */ + if (ZSEEK64(zi->z_filefunc,zi->filestream, zi->ci.pos_zip64extrainfo + 4,ZLIB_FILEFUNC_SEEK_SET)!=0) + err = ZIP_ERRNO; + + if (err==ZIP_OK) /* compressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, uncompressed_size, 8); + + if (err==ZIP_OK) /* uncompressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, compressed_size, 8); + } + else + err = ZIP_BADZIPFILE; /* Caller passed zip64 = 0, so no room for zip64 info -> fatal */ + } + else + { + if (err==ZIP_OK) /* compressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,compressed_size,4); + + if (err==ZIP_OK) /* uncompressed size, unknown */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,uncompressed_size,4); + } + + if (ZSEEK64(zi->z_filefunc,zi->filestream, cur_pos_inzip,ZLIB_FILEFUNC_SEEK_SET)!=0) + err = ZIP_ERRNO; + } + + zi->number_entry ++; + zi->in_opened_file_inzip = 0; + + return err; +} + +extern int ZEXPORT zipCloseFileInZip (zipFile file) +{ + return zipCloseFileInZipRaw (file,0,0); +} + +int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip) +{ + int err = ZIP_OK; + ZPOS64_T pos = zip64eocd_pos_inzip - zi->add_position_when_writing_offset; + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)ZIP64ENDLOCHEADERMAGIC,4); + + /*num disks*/ + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); + + /*relative offset*/ + if (err==ZIP_OK) /* Relative offset to the Zip64EndOfCentralDirectory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, pos,8); + + /*total disks*/ /* Do not support spawning of disk so always say 1 here*/ + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)1,4); + + return err; +} + +int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) +{ + int err = ZIP_OK; + + uLong Zip64DataSize = 44; + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)ZIP64ENDHEADERMAGIC,4); + + if (err==ZIP_OK) /* size of this 'zip64 end of central directory' */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(ZPOS64_T)Zip64DataSize,8); /* why ZPOS64_T of this ? */ + + if (err==ZIP_OK) /* version made by */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)45,2); + + if (err==ZIP_OK) /* version needed */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)45,2); + + if (err==ZIP_OK) /* number of this disk */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); + + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,4); + + if (err==ZIP_OK) /* total number of entries in the central dir on this disk */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, zi->number_entry, 8); + + if (err==ZIP_OK) /* total number of entries in the central dir */ + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, zi->number_entry, 8); + + if (err==ZIP_OK) /* size of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(ZPOS64_T)size_centraldir,8); + + if (err==ZIP_OK) /* offset of start of central directory with respect to the starting disk number */ + { + ZPOS64_T pos = centraldir_pos_inzip - zi->add_position_when_writing_offset; + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, (ZPOS64_T)pos,8); + } + return err; +} +int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) +{ + int err = ZIP_OK; + + /*signature*/ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)ENDHEADERMAGIC,4); + + if (err==ZIP_OK) /* number of this disk */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2); + + if (err==ZIP_OK) /* number of the disk with the start of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0,2); + + if (err==ZIP_OK) /* total number of entries in the central dir on this disk */ + { + { + if(zi->number_entry >= 0xFFFF) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xffff,2); /* use value in ZIP64 record */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2); + } + } + + if (err==ZIP_OK) /* total number of entries in the central dir */ + { + if(zi->number_entry >= 0xFFFF) + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)0xffff,2); /* use value in ZIP64 record */ + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)zi->number_entry,2); + } + + if (err==ZIP_OK) /* size of the central directory */ + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_centraldir,4); + + if (err==ZIP_OK) /* offset of start of central directory with respect to the starting disk number */ + { + ZPOS64_T pos = centraldir_pos_inzip - zi->add_position_when_writing_offset; + if(pos >= 0xffffffff) + { + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, (uLong)0xffffffff,4); + } + else + err = zip64local_putValue(&zi->z_filefunc,zi->filestream, (uLong)(centraldir_pos_inzip - zi->add_position_when_writing_offset),4); + } + + return err; +} + +int Write_GlobalComment(zip64_internal* zi, const char* global_comment) +{ + int err = ZIP_OK; + uInt size_global_comment = 0; + + if(global_comment != NULL) + size_global_comment = (uInt)strlen(global_comment); + + err = zip64local_putValue(&zi->z_filefunc,zi->filestream,(uLong)size_global_comment,2); + + if (err == ZIP_OK && size_global_comment > 0) + { + if (ZWRITE64(zi->z_filefunc,zi->filestream, global_comment, size_global_comment) != size_global_comment) + err = ZIP_ERRNO; + } + return err; +} + +extern int ZEXPORT zipClose (zipFile file, const char* global_comment) +{ + zip64_internal* zi; + int err = 0; + uLong size_centraldir = 0; + ZPOS64_T centraldir_pos_inzip; + ZPOS64_T pos; + + if (file == NULL) + return ZIP_PARAMERROR; + + zi = (zip64_internal*)file; + + if (zi->in_opened_file_inzip == 1) + { + err = zipCloseFileInZip (file); + } + +#ifndef NO_ADDFILEINEXISTINGZIP + if (global_comment==NULL) + global_comment = zi->globalcomment; +#endif + + centraldir_pos_inzip = ZTELL64(zi->z_filefunc,zi->filestream); + + if (err==ZIP_OK) + { + linkedlist_datablock_internal* ldi = zi->central_dir.first_block; + while (ldi!=NULL) + { + if ((err==ZIP_OK) && (ldi->filled_in_this_block>0)) + { + if (ZWRITE64(zi->z_filefunc,zi->filestream, ldi->data, ldi->filled_in_this_block) != ldi->filled_in_this_block) + err = ZIP_ERRNO; + } + + size_centraldir += ldi->filled_in_this_block; + ldi = ldi->next_datablock; + } + } + free_linkedlist(&(zi->central_dir)); + + pos = centraldir_pos_inzip - zi->add_position_when_writing_offset; + if(pos >= 0xffffffff || zi->number_entry > 0xFFFF) + { + ZPOS64_T Zip64EOCDpos = ZTELL64(zi->z_filefunc,zi->filestream); + Write_Zip64EndOfCentralDirectoryRecord(zi, size_centraldir, centraldir_pos_inzip); + + Write_Zip64EndOfCentralDirectoryLocator(zi, Zip64EOCDpos); + } + + if (err==ZIP_OK) + err = Write_EndOfCentralDirectoryRecord(zi, size_centraldir, centraldir_pos_inzip); + + if(err == ZIP_OK) + err = Write_GlobalComment(zi, global_comment); + + if (ZCLOSE64(zi->z_filefunc,zi->filestream) != 0) + if (err == ZIP_OK) + err = ZIP_ERRNO; + +#ifndef NO_ADDFILEINEXISTINGZIP + TRYFREE(zi->globalcomment); +#endif + TRYFREE(zi); + + return err; +} + +extern int ZEXPORT zipRemoveExtraInfoBlock (char* pData, int* dataLen, short sHeader) +{ + char* p = pData; + int size = 0; + char* pNewHeader; + char* pTmp; + short header; + short dataSize; + + int retVal = ZIP_OK; + + if(pData == NULL || *dataLen < 4) + return ZIP_PARAMERROR; + + pNewHeader = (char*)ALLOC(*dataLen); + pTmp = pNewHeader; + + while(p < (pData + *dataLen)) + { + header = *(short*)p; + dataSize = *(((short*)p)+1); + + if( header == sHeader ) /* Header found. */ + { + p += dataSize + 4; /* skip it. do not copy to temp buffer */ + } + else + { + /* Extra Info block should not be removed, So copy it to the temp buffer. */ + memcpy(pTmp, p, dataSize + 4); + p += dataSize + 4; + size += dataSize + 4; + } + + } + + if(size < *dataLen) + { + /* clean old extra info block. */ + memset(pData,0, *dataLen); + + /* copy the new extra info block over the old */ + if(size > 0) + memcpy(pData, pNewHeader, size); + + /* set the new extra info size */ + *dataLen = size; + + retVal = ZIP_OK; + } + else + retVal = ZIP_ERRNO; + + TRYFREE(pNewHeader); + + return retVal; +} diff --git a/src/third_party/minizip/zip.h b/src/third_party/minizip/zip.h new file mode 100644 index 000000000..b49750ec2 --- /dev/null +++ b/src/third_party/minizip/zip.h @@ -0,0 +1,362 @@ +/* zip.h -- IO on .zip files using zlib + Version 1.1, February 14h, 2010 + part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) + + Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) + + Modifications for Zip64 support + Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) + + For more info read MiniZip_info.txt + + --------------------------------------------------------------------------- + + Condition of use and distribution are the same than zlib : + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + --------------------------------------------------------------------------- + + Changes + + See header of zip.h + +*/ + +#ifndef _zip12_H +#define _zip12_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* #define HAVE_BZIP2 */ + +#ifndef _ZLIB_H +#include "zlib.h" +#endif + +#ifndef _ZLIBIOAPI_H +#include "ioapi.h" +#endif + +#ifdef HAVE_BZIP2 +#include "bzlib.h" +#endif + +#define Z_BZIP2ED 12 + +#if defined(STRICTZIP) || defined(STRICTZIPUNZIP) +/* like the STRICT of WIN32, we define a pointer that cannot be converted + from (void*) without cast */ +typedef struct TagzipFile__ { int unused; } zipFile__; +typedef zipFile__ *zipFile; +#else +typedef voidp zipFile; +#endif + +#define ZIP_OK (0) +#define ZIP_EOF (0) +#define ZIP_ERRNO (Z_ERRNO) +#define ZIP_PARAMERROR (-102) +#define ZIP_BADZIPFILE (-103) +#define ZIP_INTERNALERROR (-104) + +#ifndef DEF_MEM_LEVEL +# if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +# else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +# endif +#endif +/* default memLevel */ + +/* tm_zip contain date/time info */ +typedef struct tm_zip_s +{ + uInt tm_sec; /* seconds after the minute - [0,59] */ + uInt tm_min; /* minutes after the hour - [0,59] */ + uInt tm_hour; /* hours since midnight - [0,23] */ + uInt tm_mday; /* day of the month - [1,31] */ + uInt tm_mon; /* months since January - [0,11] */ + uInt tm_year; /* years - [1980..2044] */ +} tm_zip; + +typedef struct +{ + tm_zip tmz_date; /* date in understandable format */ + uLong dosDate; /* if dos_date == 0, tmu_date is used */ +/* uLong flag; */ /* general purpose bit flag 2 bytes */ + + uLong internal_fa; /* internal file attributes 2 bytes */ + uLong external_fa; /* external file attributes 4 bytes */ +} zip_fileinfo; + +typedef const char* zipcharpc; + + +#define APPEND_STATUS_CREATE (0) +#define APPEND_STATUS_CREATEAFTER (1) +#define APPEND_STATUS_ADDINZIP (2) + +extern zipFile ZEXPORT zipOpen OF((const char *pathname, int append)); +extern zipFile ZEXPORT zipOpen64 OF((const void *pathname, int append)); +/* + Create a zipfile. + pathname contain on Windows XP a filename like "c:\\zlib\\zlib113.zip" or on + an Unix computer "zlib/zlib113.zip". + if the file pathname exist and append==APPEND_STATUS_CREATEAFTER, the zip + will be created at the end of the file. + (useful if the file contain a self extractor code) + if the file pathname exist and append==APPEND_STATUS_ADDINZIP, we will + add files in existing zip (be sure you don't add file that doesn't exist) + If the zipfile cannot be opened, the return value is NULL. + Else, the return value is a zipFile Handle, usable with other function + of this zip package. +*/ + +/* Note : there is no delete function into a zipfile. + If you want delete file into a zipfile, you must open a zipfile, and create another + Of couse, you can use RAW reading and writing to copy the file you did not want delte +*/ + +extern zipFile ZEXPORT zipOpen2 OF((const char *pathname, + int append, + zipcharpc* globalcomment, + zlib_filefunc_def* pzlib_filefunc_def)); + +extern zipFile ZEXPORT zipOpen2_64 OF((const void *pathname, + int append, + zipcharpc* globalcomment, + zlib_filefunc64_def* pzlib_filefunc_def)); + +extern int ZEXPORT zipOpenNewFileInZip OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level)); + +extern int ZEXPORT zipOpenNewFileInZip64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int zip64)); + +/* + Open a file in the ZIP for writing. + filename : the filename in zip (if NULL, '-' without quote will be used + *zipfi contain supplemental information + if extrafield_local!=NULL and size_extrafield_local>0, extrafield_local + contains the extrafield data the the local header + if extrafield_global!=NULL and size_extrafield_global>0, extrafield_global + contains the extrafield data the the local header + if comment != NULL, comment contain the comment string + method contain the compression method (0 for store, Z_DEFLATED for deflate) + level contain the level of compression (can be Z_DEFAULT_COMPRESSION) + zip64 is set to 1 if a zip64 extended information block should be added to the local file header. + this MUST be '1' if the uncompressed size is >= 0xffffffff. + +*/ + + +extern int ZEXPORT zipOpenNewFileInZip2 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw)); + + +extern int ZEXPORT zipOpenNewFileInZip2_64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int zip64)); +/* + Same than zipOpenNewFileInZip, except if raw=1, we write raw file + */ + +extern int ZEXPORT zipOpenNewFileInZip3 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting)); + +extern int ZEXPORT zipOpenNewFileInZip3_64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + int zip64 + )); + +/* + Same than zipOpenNewFileInZip2, except + windowBits,memLevel,,strategy : see parameter strategy in deflateInit2 + password : crypting password (NULL for no crypting) + crcForCrypting : crc of file to compress (needed for crypting) + */ + +extern int ZEXPORT zipOpenNewFileInZip4 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + uLong versionMadeBy, + uLong flagBase + )); + + +extern int ZEXPORT zipOpenNewFileInZip4_64 OF((zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + uLong versionMadeBy, + uLong flagBase, + int zip64 + )); +/* + Same than zipOpenNewFileInZip4, except + versionMadeBy : value for Version made by field + flag : value for flag field (compression level info will be added) + */ + + +extern int ZEXPORT zipWriteInFileInZip OF((zipFile file, + const void* buf, + unsigned len)); +/* + Write data in the zipfile +*/ + +extern int ZEXPORT zipCloseFileInZip OF((zipFile file)); +/* + Close the current file in the zipfile +*/ + +extern int ZEXPORT zipCloseFileInZipRaw OF((zipFile file, + uLong uncompressed_size, + uLong crc32)); + +extern int ZEXPORT zipCloseFileInZipRaw64 OF((zipFile file, + ZPOS64_T uncompressed_size, + uLong crc32)); + +/* + Close the current file in the zipfile, for file opened with + parameter raw=1 in zipOpenNewFileInZip2 + uncompressed_size and crc32 are value for the uncompressed size +*/ + +extern int ZEXPORT zipClose OF((zipFile file, + const char* global_comment)); +/* + Close the zipfile +*/ + + +extern int ZEXPORT zipRemoveExtraInfoBlock OF((char* pData, int* dataLen, short sHeader)); +/* + zipRemoveExtraInfoBlock - Added by Mathias Svensson + + Remove extra information block from a extra information data for the local file header or central directory header + + It is needed to remove ZIP64 extra information blocks when before data is written if using RAW mode. + + 0x0001 is the signature header for the ZIP64 extra information blocks + + usage. + Remove ZIP64 Extra information from a central director extra field data + zipRemoveExtraInfoBlock(pCenDirExtraFieldData, &nCenDirExtraFieldDataLen, 0x0001); + + Remove ZIP64 Extra information from a Local File Header extra field data + zipRemoveExtraInfoBlock(pLocalHeaderExtraFieldData, &nLocalHeaderExtraFieldDataLen, 0x0001); +*/ + +#ifdef __cplusplus +} +#endif + +#endif /* _zip64_H */ diff --git a/src/third_party/rapidjson/rapidjson/document.h b/src/third_party/rapidjson/rapidjson/document.h index 5e8380302..efaf5552e 100644 --- a/src/third_party/rapidjson/rapidjson/document.h +++ b/src/third_party/rapidjson/rapidjson/document.h @@ -30,6 +30,7 @@ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(padded) RAPIDJSON_DIAG_OFF(switch-enum) RAPIDJSON_DIAG_OFF(c++98-compat) +RAPIDJSON_DIAG_OFF(conversion) #elif defined(_MSC_VER) RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data diff --git a/src/third_party/rapidjson/rapidjson/encodings.h b/src/third_party/rapidjson/rapidjson/encodings.h index 0b2446795..2e65dd6eb 100644 --- a/src/third_party/rapidjson/rapidjson/encodings.h +++ b/src/third_party/rapidjson/rapidjson/encodings.h @@ -17,11 +17,16 @@ #include "rapidjson.h" -#if defined(_MSC_VER) && !defined(__clang__) +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(conversion) +#elif defined(_MSC_VER) RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data RAPIDJSON_DIAG_OFF(4702) // unreachable code -#elif defined(__GNUC__) +#endif + +#ifdef __GNUC__ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(effc++) RAPIDJSON_DIAG_OFF(overflow) @@ -709,7 +714,11 @@ struct Transcoder { RAPIDJSON_NAMESPACE_END -#if defined(__GNUC__) || (defined(_MSC_VER) && !defined(__clang__)) +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ RAPIDJSON_DIAG_POP #endif diff --git a/src/third_party/rapidjson/rapidjson/reader.h b/src/third_party/rapidjson/rapidjson/reader.h index 44a6bcd30..dc6b6d661 100644 --- a/src/third_party/rapidjson/rapidjson/reader.h +++ b/src/third_party/rapidjson/rapidjson/reader.h @@ -42,6 +42,7 @@ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(old-style-cast) RAPIDJSON_DIAG_OFF(padded) RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(conversion) #elif defined(_MSC_VER) RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant diff --git a/src/token_aware_policy.cpp b/src/token_aware_policy.cpp index bc86dc8ca..cc2cd8394 100644 --- a/src/token_aware_policy.cpp +++ b/src/token_aware_policy.cpp @@ -36,7 +36,8 @@ static inline bool contains(const CopyOnWriteHostVec& replicas, const Address& a return false; } -void TokenAwarePolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random) { +void TokenAwarePolicy::init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc) { if (random != NULL) { if (shuffle_replicas_) { // Store random so that it can be used to shuffle replicas. @@ -47,7 +48,7 @@ void TokenAwarePolicy::init(const Host::Ptr& connected_host, const HostMap& host index_ = random->next(std::max(static_cast(1), hosts.size())); } } - ChainedLoadBalancingPolicy::init(connected_host, hosts, random); + ChainedLoadBalancingPolicy::init(connected_host, hosts, random, local_dc); } QueryPlan* TokenAwarePolicy::new_query_plan(const String& keyspace, RequestHandler* request_handler, diff --git a/src/token_aware_policy.hpp b/src/token_aware_policy.hpp index 9f4ff0d6f..5a8cee903 100644 --- a/src/token_aware_policy.hpp +++ b/src/token_aware_policy.hpp @@ -34,7 +34,8 @@ class TokenAwarePolicy : public ChainedLoadBalancingPolicy { virtual ~TokenAwarePolicy() {} - virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random); + virtual void init(const Host::Ptr& connected_host, const HostMap& hosts, Random* random, + const String& local_dc); virtual QueryPlan* new_query_plan(const String& keyspace, RequestHandler* request_handler, const TokenMap* token_map); diff --git a/src/token_map_impl.hpp b/src/token_map_impl.hpp index 39fe4f6a2..b4af34692 100644 --- a/src/token_map_impl.hpp +++ b/src/token_map_impl.hpp @@ -55,18 +55,27 @@ struct equal_to { } }; -} // namespace std - -namespace datastax { namespace internal { namespace core { +#if defined(HASH_IN_TR1) && !defined(_WIN32) +namespace tr1 { +#endif -struct HostHash { - std::size_t operator()(const Host::Ptr& host) const { +template <> +struct hash { + std::size_t operator()(const datastax::internal::core::Host::Ptr& host) const { if (!host) return 0; - return hash(host->address()); + return hasher(host->address()); } - AddressHash hash; + SPARSEHASH_HASH hasher; }; +#if defined(HASH_IN_TR1) && !defined(_WIN32) +} // namespace tr1 +#endif + +} // namespace std + +namespace datastax { namespace internal { namespace core { + class IdGenerator { public: typedef DenseHashMap IdMap; @@ -134,7 +143,7 @@ class ByteOrderedPartitioner { static StringRef name() { return "ByteOrderedPartitioner"; } }; -class HostSet : public DenseHashSet { +class HostSet : public DenseHashSet { public: HostSet() { set_empty_key(Host::Ptr(new Host(Address::EMPTY_KEY))); diff --git a/src/utils.cpp b/src/utils.cpp index fea00a33d..2e105c302 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #if (defined(WIN32) || defined(_WIN32)) #include diff --git a/src/uuids.cpp b/src/uuids.cpp index c46333192..1bd0d3bd3 100644 --- a/src/uuids.cpp +++ b/src/uuids.cpp @@ -148,7 +148,7 @@ CassError cass_uuid_from_string_n(const char* str, size_t str_length, CassUuid* if (hex_to_half_byte[p0] == -1 || hex_to_half_byte[p1] == -1) { return CASS_ERROR_LIB_BAD_PARAMS; } - buf[i] = (hex_to_half_byte[p0] << 4) + hex_to_half_byte[p1]; + buf[i] = static_cast(hex_to_half_byte[p0] << 4) + hex_to_half_byte[p1]; pos += 2; } diff --git a/src/wait_for_handler.cpp b/src/wait_for_handler.cpp index a5c5d58b4..ed6624c18 100644 --- a/src/wait_for_handler.cpp +++ b/src/wait_for_handler.cpp @@ -127,7 +127,7 @@ void WaitForHandler::on_retry_timeout(Timer* timer) { finish(); } else if (connection_->write_and_flush(callback(requests_)) == core::Request::REQUEST_ERROR_NO_AVAILABLE_STREAM_IDS) { - on_error(WaitForHandler::WAIT_FOR_ERROR_NO_STREAMS, "Connection closed"); + on_error(WaitForHandler::WAIT_FOR_ERROR_NO_STREAMS, "No streams available"); finish(); } } diff --git a/src/wait_for_handler.hpp b/src/wait_for_handler.hpp index e9454f6d2..aaad7954c 100644 --- a/src/wait_for_handler.hpp +++ b/src/wait_for_handler.hpp @@ -89,8 +89,7 @@ class WaitForHandler : public RefCounted { virtual void on_error(WaitForError code, const String& message) = 0; protected: - const Address& address() const { return connection_->address(); } - const String& address_string() const { return connection_->address_string(); } + const Host::Ptr& host() const { return connection_->host(); } const Response::Ptr& response() const { return response_; } diff --git a/src/whitelist_policy.cpp b/src/whitelist_policy.cpp index c6531e558..42c567b51 100644 --- a/src/whitelist_policy.cpp +++ b/src/whitelist_policy.cpp @@ -19,7 +19,7 @@ using namespace datastax::internal::core; bool WhitelistPolicy::is_valid_host(const Host::Ptr& host) const { - const String& host_address = host->address().to_string(false); + const String& host_address = host->address().hostname_or_address(); for (ContactPointList::const_iterator it = hosts_.begin(), end = hosts_.end(); it != end; ++it) { if (host_address.compare(*it) == 0) { return true; diff --git a/test/ccm_bridge/data/config.txt b/test/ccm_bridge/data/config.txt index 24b5b55e9..e77715ab8 100644 --- a/test/ccm_bridge/data/config.txt +++ b/test/ccm_bridge/data/config.txt @@ -52,8 +52,8 @@ #VERBOSE=true ############################################################################### -# DSE Options # -# Ensure USE_DSE=true to enable these options # +# DSE/DDAC Options # +# Ensure USE_DSE=true or USE_DDAC=true to enable these options # ############################################################################### ## # Flag to determine if DSE version should be loaded @@ -62,15 +62,21 @@ ## #USE_DSE=false ## -# DSE Deployment Version +# Flag to determine if DDAC version should be loaded # -# DSE version to deploy using CCM +# Uncomment to specify use of DDAC +## +#USE_DDAC=false +## +# DSE/DDAC Deployment Version +# +# DSE/DDAC version to deploy using CCM # -# Uncomment to specify DSE version +# Uncomment to specify DSE/DDAC version ## -#DSE_VERSION=6.0.8 +#DSE_VERSION=6.7.5 ## -# CCM DSE Credentials Type (username_password|ini_file) +# CCM DSE/DDAC Credentials Type (username_password|ini_file) # # Setting to indicate how DSE download through CCM should authenticate access # @@ -78,7 +84,7 @@ ## #DSE_CREDENTIALS_TYPE=username_password ## -# DSE Username (Username) +# DSE/DDAC Username (Username) # # Username for authenticating DSE download access # @@ -86,7 +92,7 @@ ## #DSE_USERNAME= ## -# DSE Password (Password) +# DSE/DDAC Password (Password) # # Password for authenticating DSE download access # diff --git a/test/ccm_bridge/src/bridge.cpp b/test/ccm_bridge/src/bridge.cpp index 1cf4758f1..cb2c0f20c 100644 --- a/test/ccm_bridge/src/bridge.cpp +++ b/test/ccm_bridge/src/bridge.cpp @@ -95,6 +95,7 @@ #define CCM_CONFIGURATION_KEY_DEPLOYMENT_TYPE "deployment_type" #define CCM_CONFIGURATION_KEY_VERBOSE "verbose" #define CCM_CONFIGURATION_KEY_USE_DSE "use_dse" +#define CCM_CONFIGURATION_KEY_USE_DDAC "use_ddac" #define CCM_CONFIGURATION_KEY_DSE_VERSION "dse_version" #define CCM_CONFIGURATION_KEY_DSE_CREDENTIALS_TYPE "dse_credentials_type" #define CCM_CONFIGURATION_KEY_DSE_USERNAME "dse_username" @@ -122,7 +123,7 @@ using namespace CCM; CCM::Bridge::Bridge( CassVersion server_version /*= DEFAULT_CASSANDRA_VERSION*/, bool use_git /*= DEFAULT_USE_GIT*/, const std::string& branch_tag /* ""*/, bool use_install_dir /*=DEFAULT_USE_INSTALL_DIR*/, - const std::string& install_dir /*=""*/, bool use_dse /*= DEFAULT_USE_DSE*/, + const std::string& install_dir /*=""*/, ServerType server_type /*= DEFAULT_SERVER_TYPE*/, std::vector dse_workload /*= DEFAULT_DSE_WORKLOAD*/, const std::string& cluster_prefix /*= DEFAULT_CLUSTER_PREFIX*/, DseCredentialsType dse_credentials_type /*= DEFAULT_DSE_CREDENTIALS*/, @@ -139,7 +140,7 @@ CCM::Bridge::Bridge( , branch_tag_(branch_tag) , use_install_dir_(use_install_dir) , install_dir_(install_dir) - , use_dse_(use_dse) + , server_type_(server_type) , dse_workload_(dse_workload) , cluster_prefix_(cluster_prefix) , authentication_type_(authentication_type) @@ -164,8 +165,8 @@ CCM::Bridge::Bridge( _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); #endif #endif - // Determine if DSE is being used - if (use_dse_) { + // Determine if DSE/DDAC is being used + if (!is_cassandra()) { dse_version_ = DseVersion(server_version.to_string()); cassandra_version_ = dse_version_.get_cass_version(); } @@ -200,7 +201,7 @@ CCM::Bridge::Bridge(const std::string& configuration_file) , dse_version_(DEFAULT_DSE_VERSION) , use_git_(DEFAULT_USE_GIT) , use_install_dir_(DEFAULT_USE_INSTALL_DIR) - , use_dse_(DEFAULT_USE_DSE) + , server_type_(DEFAULT_SERVER_TYPE) , dse_workload_(DEFAULT_DSE_WORKLOAD) , cluster_prefix_(DEFAULT_CLUSTER_PREFIX) , authentication_type_(DEFAULT_AUTHENTICATION) @@ -273,23 +274,38 @@ CCM::Bridge::Bridge(const std::string& configuration_file) } else if (key.compare(CCM_CONFIGURATION_KEY_USE_DSE) == 0) { // Convert the value std::stringstream ss(value); - if (!(ss >> std::boolalpha >> use_dse_).fail()) { + bool use_dse = false; + if (!(ss >> std::boolalpha >> use_dse).fail()) { + if (use_dse) server_type_ = ServerType::DSE; continue; } else { LOG_ERROR("Invalid flag \"" << value << "\" for " << CCM_CONFIGURATION_KEY_USE_DSE - << "; defaulting to \"" - << (DEFAULT_USE_DSE ? "true" : "false") << "\""); - use_dse_ = DEFAULT_USE_DSE; + << "; defaulting to \"" << DEFAULT_SERVER_TYPE.name() + << "\""); + server_type_ = DEFAULT_SERVER_TYPE; + } + } else if (key.compare(CCM_CONFIGURATION_KEY_USE_DDAC) == 0) { + // Convert the value + std::stringstream ss(value); + bool use_ddac = false; + if (!(ss >> std::boolalpha >> use_ddac).fail()) { + if (use_ddac) server_type_ = ServerType::DDAC; + continue; + } else { + LOG_ERROR("Invalid flag \"" << value << "\" for " << CCM_CONFIGURATION_KEY_USE_DDAC + << "; defaulting to \"" << DEFAULT_SERVER_TYPE.name() + << "\""); + server_type_ = DEFAULT_SERVER_TYPE; } } else if (key.compare(CCM_CONFIGURATION_KEY_DSE_CREDENTIALS_TYPE) == 0) { - // Determine the DSE credentials type + // Determine the DSE/DDAC credentials type for (DseCredentialsType::iterator iterator = DseCredentialsType::begin(); iterator != DseCredentialsType::end(); ++iterator) { if (*iterator == value) { dse_credentials_type_ = *iterator; break; } else { - LOG_ERROR("Invalid DSE credential type \"" << value << "\""); + LOG_ERROR("Invalid DSE/DDAC credential type \"" << value << "\""); } } } else if (key.compare(CCM_CONFIGURATION_KEY_DSE_USERNAME) == 0) { @@ -368,8 +384,8 @@ CCM::Bridge::Bridge(const std::string& configuration_file) << "\"; defaults will be used"); } - // Determine if DSE is being used - if (use_dse_) { + // Determine if DSE/DDAC is being used + if (!is_cassandra()) { cassandra_version_ = dse_version_.get_cass_version(); } @@ -381,8 +397,8 @@ CCM::Bridge::Bridge(const std::string& configuration_file) // Display the configuration settings being used LOG("Host: " << host_); LOG("Cassandra Version: " << cassandra_version_.to_string()); - if (use_dse_) { - LOG("DSE Version: " << dse_version_.to_string()); + if (!is_cassandra()) { + LOG(server_type_.to_string() << " Version: " << dse_version_.to_string()); } if (use_git_ && !branch_tag_.empty()) { LOG(" Branch/Tag: " << branch_tag_); @@ -546,7 +562,7 @@ bool CCM::Bridge::create_cluster(std::vector data_center_nodes, is_password_authenticator, is_ssl, is_client_authentication); for (std::vector::iterator iterator = dse_workload_.begin(); iterator != dse_workload_.end(); ++iterator) { - if (use_dse_ && *iterator != DSE_WORKLOAD_CASSANDRA) { + if (is_dse() && *iterator != DSE_WORKLOAD_CASSANDRA) { cluster_name.append("-").append(dse_workloads_[*iterator]); } } @@ -563,34 +579,36 @@ bool CCM::Bridge::create_cluster(std::vector data_center_nodes, create_command.push_back("--install-dir=" + install_dir_); } else { create_command.push_back("-v"); - if (use_dse_) { + if (is_cassandra()) { if (use_git_) { if (branch_tag_.empty()) { - create_command.push_back("git:" + dse_version_.to_string()); + create_command.push_back("git:cassandra-" + cassandra_version_.to_string()); } else { create_command.push_back("git:" + branch_tag_); } } else { - create_command.push_back(dse_version_.to_string()); - } - if (dse_credentials_type_ == DseCredentialsType::USERNAME_PASSWORD) { - create_command.push_back("--dse-username=" + dse_username_); - create_command.push_back("--dse-password=" + dse_password_); + create_command.push_back(cassandra_version_.to_string()); } } else { if (use_git_) { if (branch_tag_.empty()) { - create_command.push_back("git:cassandra-" + cassandra_version_.to_string()); + create_command.push_back("git:" + dse_version_.to_string()); } else { create_command.push_back("git:" + branch_tag_); } } else { - create_command.push_back(cassandra_version_.to_string()); + create_command.push_back(dse_version_.to_string()); + } + if (dse_credentials_type_ == DseCredentialsType::USERNAME_PASSWORD) { + create_command.push_back("--dse-username=" + dse_username_); + create_command.push_back("--dse-password=" + dse_password_); } } } - if (use_dse_) { + if (is_dse()) { create_command.push_back("--dse"); + } else if (is_ddac()) { + create_command.push_back("--ddac"); } create_command.push_back("-b"); @@ -614,7 +632,7 @@ bool CCM::Bridge::create_cluster(std::vector data_center_nodes, // Generate the cluster update configuration command and execute execute_ccm_command(generate_create_updateconf_command(cassandra_version_)); - if (dse_version_ >= "6.7.0") { + if (is_dse() && dse_version_ >= "6.7.0") { update_cluster_configuration("user_defined_function_fail_micros", "5000000"); } @@ -639,7 +657,7 @@ bool CCM::Bridge::create_cluster(std::vector data_center_nodes, } // Set the DSE workload (if applicable) - if (use_dse_ && !(dse_workload_.size() == 1 && dse_workload_[0] == DSE_WORKLOAD_CASSANDRA)) { + if (is_dse() && !(dse_workload_.size() == 1 && dse_workload_[0] == DSE_WORKLOAD_CASSANDRA)) { set_dse_workloads(dse_workload_); } } @@ -888,7 +906,7 @@ unsigned int CCM::Bridge::add_node(const std::string& data_center /*= ""*/) { add_node_command.push_back("-d"); add_node_command.push_back(data_center); } - if (use_dse_) { + if (is_dse()) { add_node_command.push_back("--dse"); } add_node_command.push_back(generate_node_name(node)); @@ -917,8 +935,8 @@ bool CCM::Bridge::decommission_node(unsigned int node, bool is_force /*= false*/ std::vector decommission_node_command; decommission_node_command.push_back(generate_node_name(node)); decommission_node_command.push_back("decommission"); - if (is_force && ((!use_dse_ && cassandra_version_ >= "3.12") || // Cassandra v3.12+ - (use_dse_ && dse_version_ >= "5.1.0"))) { // DataStax Enterprise v5.1.0+ + if (is_force && ((is_cassandra() && cassandra_version_ >= "3.12") || // Cassandra v3.12+ + (!is_cassandra() && dse_version_ >= "5.1.0"))) { // DataStax Enterprise v5.1.0+ decommission_node_command.push_back("--force"); } execute_ccm_command(decommission_node_command); @@ -1186,7 +1204,7 @@ DseVersion CCM::Bridge::get_dse_version() { } // Unable to determine version information from active cluster - throw BridgeException("Unable to determine version information from active DSE cluster \"" + + throw BridgeException("Unable to determine version information from active DSE/DDAC cluster \"" + get_active_cluster() + "\""); } @@ -1214,7 +1232,7 @@ DseVersion CCM::Bridge::get_dse_version(const std::string& configuration_file) { } } - // Return the DSE version + // Return the DSE/DDAC version return dse_version; } @@ -1698,9 +1716,11 @@ std::string CCM::Bridge::execute_ccm_command(const std::vector& com std::string output; if (deployment_type_ == DeploymentType::LOCAL) { #ifdef _WIN32 - if (use_dse_) { - throw BridgeException("DSE v" + dse_version_.to_string() + - " cannot be launched on Windows platform"); + if (!is_cassandra()) { + std::stringstream message; + message << server_type_.to_string() << " v" << dse_version_.to_string() + << " cannot be launched on Windows platform"; + throw BridgeException(message.str()); } #endif utils::Process::Result result = utils::Process::execute(ccm_command); @@ -1756,7 +1776,7 @@ std::string CCM::Bridge::generate_cluster_name(CassVersion cassandra_version, bool is_ssl, bool is_client_authentication) { std::stringstream cluster_name; std::string server_version = - use_dse_ ? dse_version_.to_string(false) : cassandra_version.to_string(false); + !is_cassandra() ? dse_version_.to_string(false) : cassandra_version.to_string(false); std::replace(server_version.begin(), server_version.end(), '.', '-'); cluster_name << cluster_prefix_ << "_" << server_version << "_" << generate_cluster_nodes(data_center_nodes, '-'); @@ -1794,8 +1814,8 @@ CCM::Bridge::generate_create_updateconf_command(CassVersion cassandra_version) { // Create the update configuration command (common updates) std::vector updateconf_command; updateconf_command.push_back("updateconf"); - // Disable optimizations (limits) when using DSE - if (!use_dse_) { + // Disable optimizations (limits) when using DSE/DDAC + if (is_cassandra()) { updateconf_command.push_back("--rt=10000"); updateconf_command.push_back("read_request_timeout_in_ms:10000"); updateconf_command.push_back("write_request_timeout_in_ms:10000"); diff --git a/test/ccm_bridge/src/bridge.hpp b/test/ccm_bridge/src/bridge.hpp index 1ae7aaa0d..5f190540d 100644 --- a/test/ccm_bridge/src/bridge.hpp +++ b/test/ccm_bridge/src/bridge.hpp @@ -23,6 +23,7 @@ #include "deployment_type.hpp" #include "dse_credentials_type.hpp" #include "process.hpp" +#include "server_type.hpp" #include "tsocket.hpp" #include @@ -45,10 +46,12 @@ typedef struct _LIBSSH2_CHANNEL LIBSSH2_CHANNEL; // Default values #define DEFAULT_CASSANDRA_VERSION CassVersion("3.11.4") -#define DEFAULT_DSE_VERSION DseVersion("6.0.8") +#define DEFAULT_DSE_VERSION DseVersion("6.7.5") #define DEFAULT_USE_GIT false #define DEFAULT_USE_INSTALL_DIR false +#define DEFAULT_SERVER_TYPE ServerType(ServerType::CASSANDRA) #define DEFAULT_USE_DSE false +#define DEFAULT_USE_DDAC false #define DEFAULT_CLUSTER_PREFIX "cpp-driver" #define DEFAULT_DSE_CREDENTIALS DseCredentialsType::USERNAME_PASSWORD #define DEFAULT_DEPLOYMENT DeploymentType::LOCAL @@ -158,8 +161,7 @@ class Bridge { * (default: DEAFAULT_USE_INSTALL_DIR) * @param install_dir Installation directory to use when use_install_dir is * enabled (default: Empty) - * @param use_dse True if CCM should load DSE for provided version; false - * otherwise (default: DEFAULT_USE_DSE) + * @param server_type Server type CCM should create (default: CASSANDRA) * @param dse_workload DSE workload to utilize * (default: DEFAULT_DSE_WORKLOAD) * @param cluster_prefix Prefix to use when creating a cluster name @@ -192,7 +194,7 @@ class Bridge { */ Bridge(CassVersion cassandra_version = DEFAULT_CASSANDRA_VERSION, bool use_git = DEFAULT_USE_GIT, const std::string& branch_tag = "", bool use_install_dir = DEFAULT_USE_INSTALL_DIR, - const std::string& install_dir = "", bool use_dse = DEFAULT_USE_DSE, + const std::string& install_dir = "", ServerType server_type = DEFAULT_SERVER_TYPE, std::vector dse_workload = DEFAULT_DSE_WORKLOAD, const std::string& cluster_prefix = DEFAULT_CLUSTER_PREFIX, DseCredentialsType dse_credentials_type = DEFAULT_DSE_CREDENTIALS, @@ -535,7 +537,26 @@ class Bridge { */ void execute_cql_on_node(unsigned int node, const std::string& cql); - CCM_BRIDGE_DEPRECATED(bool is_dse() { return use_dse_; }) + /** + * Determine if server type is Apache Cassandra + * + * @return True if Cassandra; false otherwise + */ + bool is_cassandra() { return server_type_ == ServerType::CASSANDRA; } + + /** + * Determine if server type is DataStax Enterprise + * + * @return True if DSE; false otherwise + */ + bool is_dse() { return server_type_ == ServerType::DSE; } + + /** + * Determine if server type is DataStax Distribution of Apache Cassandra + * + * @return True if DDAC; false otherwise + */ + bool is_ddac() { return server_type_ == ServerType::DDAC; } /** * Force decommission of a node on the active Cassandra cluster @@ -777,9 +798,9 @@ class Bridge { */ std::string install_dir_; /** - * Flag to determine if DSE is being used + * Server type to use with CCM */ - bool use_dse_; + ServerType server_type_; /** * Workload to apply to the DSE cluster * diff --git a/test/ccm_bridge/src/cass_version.hpp b/test/ccm_bridge/src/cass_version.hpp index c3f93eb55..a36949708 100644 --- a/test/ccm_bridge/src/cass_version.hpp +++ b/test/ccm_bridge/src/cass_version.hpp @@ -294,7 +294,7 @@ class DseVersion : public CassVersion { : CassVersion(version) {} /** - * Create the CassVersion from a human readable string + * Create the DseVersion from a human readable string * * @param version_string String representation to convert */ @@ -477,17 +477,26 @@ class DseVersion : public CassVersion { return CassVersion("3.11.3-5113"); } else if (*this == "5.1.14") { return CassVersion("3.11.3-5114"); - } else if (*this >= "5.1.15" && *this < "6.0.0") { - if (*this > "5.1.15") { + } else if (*this == "5.1.15") { + return CassVersion("3.11.4-5115"); + } else if (*this == "5.1.16") { + return CassVersion("3.11.4-5116"); + } else if (*this >= "5.1.17" && *this < "6.0.0") { + if (*this > "5.1.17") { std::cerr << "Cassandra Version is not Defined: " << "Add Cassandra version for DSE v" << this->to_string() << std::endl; } - return CassVersion("3.11.3-5115"); + return CassVersion("3.11.4-5117"); } else if (*this >= "6.0.0" && *this < "6.7.0") { return CassVersion( "3.11.2-5111"); // Versions before DSE 6.7 erroneously return they support Cassandra 4.0.0 } else if (*this >= "6.7.0" && *this < "7.0.0") { return CassVersion("4.0.0"); + + // DSE version does not correspond to a valid Cassandra version + std::cerr << "Cassandra Version is not Defined: " + << "Add Cassandra version for DSE v" << this->to_string() << std::endl; + return CassVersion("0.0.0"); } // DSE version does not correspond to a valid Cassandra version diff --git a/test/ccm_bridge/src/dse_credentials_type.hpp b/test/ccm_bridge/src/dse_credentials_type.hpp index e70f20c5b..307972ac0 100644 --- a/test/ccm_bridge/src/dse_credentials_type.hpp +++ b/test/ccm_bridge/src/dse_credentials_type.hpp @@ -23,7 +23,7 @@ namespace CCM { /** - * DSE credential stype indicating how authentication for DSE downloads is + * DSE credentials type indicating how authentication for DSE downloads is * performed through CCM */ class DseCredentialsType { diff --git a/test/ccm_bridge/src/server_type.hpp b/test/ccm_bridge/src/server_type.hpp new file mode 100644 index 000000000..c8925ccab --- /dev/null +++ b/test/ccm_bridge/src/server_type.hpp @@ -0,0 +1,74 @@ +/* + Copyright (c) DataStax, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef CCM_SERVER_TYPE_HPP +#define CCM_SERVER_TYPE_HPP + +#include + +namespace CCM { + +/** + * Server type to indicate which type of server to initialize and start using + * CCM + */ +class ServerType { +public: + enum Type { INVALID, CASSANDRA, DSE, DDAC }; + + ServerType(Type type = INVALID) + : type_(type) {} + + ServerType(const ServerType& server_type) + : type_(server_type.type_) {} + + const char* name() { + switch (type_) { + case CASSANDRA: + return "CASSANDRA"; + case DSE: + return "DSE"; + case DDAC: + return "DDAC"; + default: + return "INVALID"; + } + } + + const char* to_string() { + switch (type_) { + case CASSANDRA: + return "Apache Cassandra"; + case DSE: + return "DataStax Enterprise"; + case DDAC: + return "DataStax Distribution of Apache Cassandra"; + default: + return "Invalid Server Type"; + } + } + + bool operator==(const ServerType& other) const { return type_ == other.type_; } + + bool operator<(const ServerType& other) const { return type_ < other.type_; } + +private: + Type type_; +}; + +} // namespace CCM + +#endif // CCM_SERVER_TYPE_HPP diff --git a/test/integration_tests/CMakeLists.txt b/test/integration_tests/CMakeLists.txt index c4cf22719..2078b8c85 100644 --- a/test/integration_tests/CMakeLists.txt +++ b/test/integration_tests/CMakeLists.txt @@ -54,12 +54,6 @@ target_link_libraries(${PROJECT_INTEGRATION_TESTS_NAME} if(UNIX) target_link_libraries(${PROJECT_INTEGRATION_TESTS_NAME} pthread) # test_basics requires pthread endif() -set_property( - TARGET ${PROJECT_INTEGRATION_TESTS_NAME} - APPEND PROPERTY COMPILE_FLAGS ${CASS_TEST_CXX_FLAGS}) -set_property( - TARGET ${PROJECT_INTEGRATION_TESTS_NAME} - APPEND PROPERTY LINK_FLAGS ${PROJECT_CXX_LINKER_FLAGS}) if(BOOST_LIBRARY_NAME) add_dependencies(${PROJECT_INTEGRATION_TESTS_NAME} ${BOOST_LIBRARY_NAME}) endif() diff --git a/test/integration_tests/src/test_async.cpp b/test/integration_tests/src/test_async.cpp index 80d718b06..aa3b2c8c1 100644 --- a/test/integration_tests/src/test_async.cpp +++ b/test/integration_tests/src/test_async.cpp @@ -96,7 +96,7 @@ struct AsyncTests : public test_utils::SingleSessionTest { } }; -BOOST_FIXTURE_TEST_SUITE(async, AsyncTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(async, AsyncTests, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(simple) { std::string table_name = diff --git a/test/integration_tests/src/test_authentication.cpp b/test/integration_tests/src/test_authentication.cpp index ee0cdb131..2f43586cf 100644 --- a/test/integration_tests/src/test_authentication.cpp +++ b/test/integration_tests/src/test_authentication.cpp @@ -83,7 +83,8 @@ void on_auth_initial(CassAuthenticator* auth, void* data) { cass_authenticator_set_error(auth, NULL); } -BOOST_FIXTURE_TEST_SUITE(authentication, AuthenticationTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(authentication, AuthenticationTests, + *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(protocol_versions) { auth(3); diff --git a/test/integration_tests/src/test_basics.cpp b/test/integration_tests/src/test_basics.cpp index 0109107cd..f82702ae9 100644 --- a/test/integration_tests/src/test_basics.cpp +++ b/test/integration_tests/src/test_basics.cpp @@ -372,7 +372,7 @@ struct BasicTests : public test_utils::SingleSessionTest { } }; -BOOST_FIXTURE_TEST_SUITE(basics, BasicTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(basics, BasicTests, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(basic_types) { if ((version.major_version >= 2 && version.minor_version >= 2) || version.major_version >= 3) { diff --git a/test/integration_tests/src/test_batch.cpp b/test/integration_tests/src/test_batch.cpp index 25683c4ff..0f54fa0b2 100644 --- a/test/integration_tests/src/test_batch.cpp +++ b/test/integration_tests/src/test_batch.cpp @@ -76,7 +76,7 @@ struct BatchTests : public test_utils::SingleSessionTest { const char* BatchTests::SIMPLE_TABLE_NAME = "simple_batch_testing_table"; const char* BatchTests::COUNTER_TABLE_NAME = "counter_batch_testing_table"; -BOOST_AUTO_TEST_SUITE(batch) +BOOST_AUTO_TEST_SUITE_WITH_DECOR(batch, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(prepared) { CCM::CassVersion version = test_utils::get_version(); diff --git a/test/integration_tests/src/test_by_name.cpp b/test/integration_tests/src/test_by_name.cpp index f166a3701..e1ad171e3 100644 --- a/test/integration_tests/src/test_by_name.cpp +++ b/test/integration_tests/src/test_by_name.cpp @@ -186,7 +186,7 @@ struct ByNameTests : public test_utils::SingleSessionTest { } }; -BOOST_FIXTURE_TEST_SUITE(by_name, ByNameTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(by_name, ByNameTests, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(bind_and_get_prepared) { test_utils::CassPreparedPtr prepared = test_utils::prepare(session, INSERT_BY_NAME); diff --git a/test/integration_tests/src/test_config.cpp b/test/integration_tests/src/test_config.cpp index 2b2b18ae0..f73fd3663 100644 --- a/test/integration_tests/src/test_config.cpp +++ b/test/integration_tests/src/test_config.cpp @@ -26,7 +26,7 @@ struct ConfigTests { ConfigTests() {} }; -BOOST_FIXTURE_TEST_SUITE(config, ConfigTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(config, ConfigTests, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(options) { test_utils::CassClusterPtr cluster(cass_cluster_new()); diff --git a/test/integration_tests/src/test_consistency.cpp b/test/integration_tests/src/test_consistency.cpp index a632b1f2f..806f0f380 100644 --- a/test/integration_tests/src/test_consistency.cpp +++ b/test/integration_tests/src/test_consistency.cpp @@ -33,7 +33,7 @@ struct ConsistencyTests { , ip_prefix(ccm->get_ip_prefix()) {} }; -BOOST_FIXTURE_TEST_SUITE(consistency, ConsistencyTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(consistency, ConsistencyTests, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(simple_two_nodes) { test_utils::CassClusterPtr cluster(cass_cluster_new()); diff --git a/test/integration_tests/src/test_control_connection.cpp b/test/integration_tests/src/test_control_connection.cpp index 2ede30baf..00a8054a2 100644 --- a/test/integration_tests/src/test_control_connection.cpp +++ b/test/integration_tests/src/test_control_connection.cpp @@ -95,7 +95,8 @@ struct ControlConnectionTests { } }; -BOOST_FIXTURE_TEST_SUITE(control_connection, ControlConnectionTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(control_connection, ControlConnectionTests, + *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(connect_invalid_ip) { test_utils::CassLog::reset("Unable to establish a control connection to host " diff --git a/test/integration_tests/src/test_custom_payload.cpp b/test/integration_tests/src/test_custom_payload.cpp index 2e0f44897..29b7023d1 100644 --- a/test/integration_tests/src/test_custom_payload.cpp +++ b/test/integration_tests/src/test_custom_payload.cpp @@ -24,7 +24,7 @@ #include #include -BOOST_AUTO_TEST_SUITE(custom_payload) +BOOST_AUTO_TEST_SUITE_WITH_DECOR(custom_payload, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(simple) { CCM::CassVersion version = test_utils::get_version(); diff --git a/test/integration_tests/src/test_datatypes.cpp b/test/integration_tests/src/test_datatypes.cpp index d35cc810d..8ccb734cd 100644 --- a/test/integration_tests/src/test_datatypes.cpp +++ b/test/integration_tests/src/test_datatypes.cpp @@ -115,7 +115,7 @@ struct DataTypesTests : public test_utils::SingleSessionTest { } }; -BOOST_FIXTURE_TEST_SUITE(datatypes, DataTypesTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(datatypes, DataTypesTests, *boost::unit_test::disabled()) /** * Read/Write Datatypes diff --git a/test/integration_tests/src/test_load_balancing.cpp b/test/integration_tests/src/test_load_balancing.cpp index 34bd47237..86656d89e 100644 --- a/test/integration_tests/src/test_load_balancing.cpp +++ b/test/integration_tests/src/test_load_balancing.cpp @@ -54,7 +54,8 @@ struct LoadBalancingTests { } }; -BOOST_FIXTURE_TEST_SUITE(load_balancing, LoadBalancingTests) +BOOST_FIXTURE_TEST_SUITE_WITH_DECOR(load_balancing, LoadBalancingTests, + *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(round_robin) { test_utils::CassClusterPtr cluster(cass_cluster_new()); diff --git a/test/integration_tests/src/test_pool.cpp b/test/integration_tests/src/test_pool.cpp index 79021b6f4..9022104b2 100644 --- a/test/integration_tests/src/test_pool.cpp +++ b/test/integration_tests/src/test_pool.cpp @@ -48,7 +48,7 @@ struct TestPool : public test_utils::MultipleNodesTest { } }; -BOOST_AUTO_TEST_SUITE(pool) +BOOST_AUTO_TEST_SUITE_WITH_DECOR(pool, *boost::unit_test::disabled()) BOOST_AUTO_TEST_CASE(connection_spawn) { TestPool tester; diff --git a/test/integration_tests/src/test_speculative_execution_policy.cpp b/test/integration_tests/src/test_speculative_execution_policy.cpp index d357af14d..8ab6ac3d3 100644 --- a/test/integration_tests/src/test_speculative_execution_policy.cpp +++ b/test/integration_tests/src/test_speculative_execution_policy.cpp @@ -144,7 +144,7 @@ struct TestSpeculativeExecutionPolicy : public test_utils::SingleSessionTest { Future* native_future = static_cast(future.get()); if (native_future->type() == Future::FUTURE_TYPE_RESPONSE) { ResponseFuture* native_response_future = static_cast(native_future); - host = native_response_future->address().to_string().c_str(); + host = native_response_future->address().hostname_or_address().c_str(); } return host; } diff --git a/topics/cloud/README.md b/topics/cloud/README.md new file mode 100644 index 000000000..7968e9d76 --- /dev/null +++ b/topics/cloud/README.md @@ -0,0 +1,50 @@ +# Cloud + +## Connecting to your [DataStax Apollo database on Constellation] using a secure connection bundle + + **Note:** Both the C++ Driver for Apache Cassandra and the C++ Driver for DataStax + Enterprise (DSE) use the same code to connect and query your Cassandra database, + but when using the DSE driver use the header `#include `. + + Use the following code snippet to connect your database: + + ```c + #include /* Use "#include " when using the C++ DSE Driver */ + #include + + int main(int argc, char* argv[]) { + /* Setup and connect to cluster */ + CassCluster* cluster = cass_cluster_new(); + CassSession* session = cass_session_new(); + + /* Setup driver to connect to the cloud using the secure connection bundle */ + const char* secure_connect_bundle = "/path/to/secure-connect-database_name.zip"; + if (cass_cluster_set_cloud_secure_connection_bundle(cluster, secure_connect_bundle) != CASS_OK) { + fprintf(stderr, "Unable to configure cloud using the secure connection bundle: %s\n", + secure_connect_bundle); + return 1; + } + + /* Set credentials provided when creating your database */ + cass_cluster_set_credentials(cluster, "username", "password"); + + CassFuture* connect_future = cass_session_connect(session, cluster); + + if (cass_future_error_code(connect_future) == CASS_OK) { + /* Use the session to run queries */ + } else { + /* Handle error */ + } + + cass_future_free(connect_future); + cass_cluster_free(cluster); + cass_session_free(session); + + return 0; + } + ``` + + **Note:** `cass_cluster_set_contact_points()` and `cass_cluster_set_ssl()` should not used + in conjunction with `cass_cluster_set_cloud_secure_connection_bundle()`. + +[DataStax Apollo database on Constellation]: https://constellation.datastax.com/