diff --git a/.dockerignore b/.dockerignore index c4ab83c6e..919a15e1c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,3 +2,8 @@ cmake-build* integration_tests/external* Dockerfile .dockerignore +.gitignore +.git +appimage/Dockerfile.part +appimage/export.sh +appimage/Makefile diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..871d64ee3 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,223 @@ +name: Build + +on: + workflow_dispatch: + pull_request: + branches: + - develop + - release + push: + branches: + - develop + - release + +env: + # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) + BUILD_TYPE: Release + +jobs: + build: + # The CMake configure and build commands are platform agnostic and should work equally + # well on Windows or Mac. You can convert this to a matrix build if you need + # cross-platform coverage. + # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + strategy: + matrix: + os: [ ubuntu-latest, macos-latest ] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v2 + + - name: Create Build Environment + # Some projects don't allow in-source building, so create a separate build directory + # We'll use this as our working directory for all subsequent commands + run: cmake -E make_directory ${{github.workspace}}/build + + - name: Get Conan + # You may pin to the exact commit or the version. + # uses: turtlebrowser/get-conan@4dc7e6dd45c8b1e02e909979d7cfc5ebba6ddbe2 + uses: turtlebrowser/get-conan@v1.0 + + - name: Conan profile and settings + run: | + conan profile new --detect default + conan config set general.revisions_enabled=1 + + - name: Conan profile (linux-workaround) + if: matrix.os == 'ubuntu-latest' + run: + conan profile update settings.compiler.libcxx=libstdc++11 default + + - name: Conan install (osx-workaround) + if: matrix.os == 'macos-latest' + working-directory: ${{github.workspace}}/build + run: | + conan remote add ns1labs-conan https://ns1labs.jfrog.io/artifactory/api/conan/ns1labs-conan + conan install --build=missing .. + + - name: linux package install + if: matrix.os == 'ubuntu-latest' + run: | + sudo apt-get update + sudo apt-get install --yes --no-install-recommends golang ca-certificates jq + + - name: Configure CMake + # Use a bash shell so we can use the same syntax for environment variable + # access regardless of the host operating system + shell: bash + working-directory: ${{github.workspace}}/build + # Note the current convention is to use the -S and -B options here to specify source + # and build directories, but this is only available with CMake 3.13 and higher. + # The CMake binaries on the Github Actions machines are (as of this writing) 3.12 + run: PKG_CONFIG_PATH=${{github.workspace}}/local/lib/pkgconfig cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE + + - name: Build + working-directory: ${{github.workspace}}/build + shell: bash + # Execute the build. You can specify a specific target with "--target " + run: cmake --build . --config $BUILD_TYPE -- -j 2 + + - name: Test + working-directory: ${{github.workspace}}/build + shell: bash + # Execute tests defined by the CMake configuration. + # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail + run: ctest -C $BUILD_TYPE + + package: + needs: build + runs-on: ubuntu-latest + # if this is a push into one of our main branches (rather than just a pull request), we will also package + if: github.event_name != 'pull_request' + + steps: + - uses: actions/checkout@v2 + + - name: Create Build Environment + run: cmake -E make_directory ${{github.workspace}}/build + + - name: Get Conan + uses: turtlebrowser/get-conan@v1.0 + + - name: Conan profile and settings + run: | + conan profile new --detect default + conan config set general.revisions_enabled=1 + conan profile update settings.compiler.libcxx=libstdc++11 default + + - name: Configure CMake to generate VERSION + shell: bash + working-directory: ${{github.workspace}}/build + run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE + + - name: Get branch name + shell: bash + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV + + - name: Debug branch name + run: echo ${{ env.BRANCH_NAME }} + + - name: Get VERSION + run: | + echo "VERSION=`cat ${{github.workspace}}/build/VERSION`" >> $GITHUB_ENV + + - name: Debug version + run: echo ${{ env.VERSION }} + + - name: Generate ref tag (master) + if: ${{ env.BRANCH_NAME == 'master' }} + run: | + echo "REF_TAG=latest" >> $GITHUB_ENV + echo "PRERELEASE=false" >> $GITHUB_ENV + echo "DRAFT=true" >> $GITHUB_ENV + + - name: Generate ref tag (develop) + if: ${{ env.BRANCH_NAME == 'develop' }} + run: | + echo "REF_TAG=latest-develop" >> $GITHUB_ENV + echo "PRERELEASE=true" >> $GITHUB_ENV + echo "DRAFT=false" >> $GITHUB_ENV + + - name: Generate ref tag (release candidate) + if: ${{ env.BRANCH_NAME == 'release' }} + run: | + echo "REF_TAG=latest-rc" >> $GITHUB_ENV + echo "PRERELEASE=true" >> $GITHUB_ENV + echo "DRAFT=false" >> $GITHUB_ENV + + - name: Debug ref tag + run: echo ${{ env.REF_TAG }} + + - name: Manage Github ref tags + uses: actions/github-script@v3 + with: + github-token: ${{ github.token }} + # note deleteRef can't start with refs/, but create createRef does. + script: | + try { + await github.git.deleteRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: "tags/${{ env.REF_TAG }}", + }) + } catch (e) { + console.log("The tag doesn't exist yet: " + e) + } + await github.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: "refs/tags/${{ env.REF_TAG }}", + sha: context.sha + }) + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build + push VERSION - pktvisor + env: + IMAGE_NAME: ns1labs/pktvisor + run: | + docker build . --file docker/Dockerfile --tag ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + docker push ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + + - name: Tag + push docker image with ref tag (cached build) - pktvisor + env: + IMAGE_NAME: ns1labs/pktvisor + run: | + docker build . --file docker/Dockerfile --tag ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} + docker push ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} + + - name: Build + push VERSION - pktvisor-prom-write + env: + IMAGE_NAME: ns1labs/pktvisor-prom-write + working-directory: ${{github.workspace}}/centralized_collection/prometheus/docker-grafana-agent + run: | + docker build . --file Dockerfile --build-arg PKTVISOR_TAG=${{ env.REF_TAG }} --tag ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + docker push ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + + - name: Tag + push docker image with ref tag (cached build) - pktvisor-prom-write + env: + IMAGE_NAME: ns1labs/pktvisor-prom-write + working-directory: ${{github.workspace}}/centralized_collection/prometheus/docker-grafana-agent + run: | + docker build . --file Dockerfile --build-arg PKTVISOR_TAG=${{ env.REF_TAG }} --tag ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} + docker push ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} + + - name: Generate AppImage + env: + IMAGE_NAME: ns1labs/pktvisor + working-directory: ${{github.workspace}}/appimage + run: | + DEV_IMAGE="${{ env.IMAGE_NAME }}:${{ env.VERSION }}" DEV_MODE=t make pktvisor-x86_64.AppImage + mv pktvisor-x86_64.AppImage pktvisor-x86_64-${{ env.VERSION }}.AppImage + + - name: Upload AppImage artifact + uses: actions/upload-artifact@v2 + with: + name: pktvisor-x86_64-${{ env.VERSION }}.AppImage + path: ${{github.workspace}}/appimage/pktvisor-x86_64-${{ env.VERSION }}.AppImage + diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml deleted file mode 100644 index 8634bddc8..000000000 --- a/.github/workflows/cmake.yml +++ /dev/null @@ -1,112 +0,0 @@ -name: Build - -on: - pull_request: - branches: - - develop - - release/** - push: - branches: - - master - - develop - - release/** - -env: - # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) - BUILD_TYPE: RelWithDebInfo - -jobs: - build: - # The CMake configure and build commands are platform agnostic and should work equally - # well on Windows or Mac. You can convert this to a matrix build if you need - # cross-platform coverage. - # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix - strategy: - matrix: - os: [ ubuntu-latest, macos-latest ] - runs-on: ${{ matrix.os }} - - steps: - - uses: actions/checkout@v2 - - - name: Create Build Environment - # Some projects don't allow in-source building, so create a separate build directory - # We'll use this as our working directory for all subsequent commands - run: cmake -E make_directory ${{github.workspace}}/build - - - name: Get Conan - # You may pin to the exact commit or the version. - # uses: turtlebrowser/get-conan@4dc7e6dd45c8b1e02e909979d7cfc5ebba6ddbe2 - uses: turtlebrowser/get-conan@v1.0 - - - name: Conan profile - run: | - conan profile new --detect default - - - name: Conan profile (linux) - if: matrix.os == 'ubuntu-latest' - run: - conan profile update settings.compiler.libcxx=libstdc++11 default - - - name: Conan install - working-directory: ${{github.workspace}}/build - run: conan install .. - - - name: linux package install - if: matrix.os == 'ubuntu-latest' - run: | - sudo apt-get update - sudo apt-get install --yes --no-install-recommends libpcap-dev pkgconf golang ca-certificates libmaxminddb-dev jq - - - name: osx package install - if: matrix.os == 'macos-latest' - run: | - brew update - brew install libmaxminddb - - - name: PcapPlusPlus checkout - run: | - git clone https://github.com/ns1/PcapPlusPlus.git - cd PcapPlusPlus - mkdir ${{github.workspace}}/local - - - name: PcapPlusPlus config (linux) - if: matrix.os == 'ubuntu-latest' - working-directory: ${{github.workspace}}/PcapPlusPlus - run: | - ./configure-linux.sh --install-dir ${{github.workspace}}/local - - - name: PcapPlusPlus config (macos) - if: matrix.os == 'macos-latest' - working-directory: ${{github.workspace}}/PcapPlusPlus - run: | - ./configure-mac_os_x.sh --install-dir ${{github.workspace}}/local - - - name: PcapPlusPlus install - working-directory: ${{github.workspace}}/PcapPlusPlus - run: | - make libs - make install -j 2 - - - name: Configure CMake - # Use a bash shell so we can use the same syntax for environment variable - # access regardless of the host operating system - shell: bash - working-directory: ${{github.workspace}}/build - # Note the current convention is to use the -S and -B options here to specify source - # and build directories, but this is only available with CMake 3.13 and higher. - # The CMake binaries on the Github Actions machines are (as of this writing) 3.12 - run: PKG_CONFIG_PATH=${{github.workspace}}/local/lib/pkgconfig cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE - - - name: Build - working-directory: ${{github.workspace}}/build - shell: bash - # Execute the build. You can specify a specific target with "--target " - run: cmake --build . --config $BUILD_TYPE -- -j 2 - - - name: Test - working-directory: ${{github.workspace}}/build - shell: bash - # Execute tests defined by the CMake configuration. - # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail - run: ctest -C $BUILD_TYPE diff --git a/.gitignore b/.gitignore index 061f69f56..82dd8837c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,5 @@ cmake-build-*/ docs/html-documentation-generated* integration_tests/external golang/pkg/client/version.go +docs/internals/html +appimage/*.AppImage \ No newline at end of file diff --git a/3rd/CMakeLists.txt b/3rd/CMakeLists.txt index f32ba2330..50bd4859f 100644 --- a/3rd/CMakeLists.txt +++ b/3rd/CMakeLists.txt @@ -4,3 +4,4 @@ message(STATUS "3rd party libraries") add_subdirectory(datasketches) add_subdirectory(rng) add_subdirectory(timer) +add_subdirectory(libmaxminddb) diff --git a/3rd/libmaxminddb/CMakeLists.txt b/3rd/libmaxminddb/CMakeLists.txt new file mode 100644 index 000000000..ba4326e01 --- /dev/null +++ b/3rd/libmaxminddb/CMakeLists.txt @@ -0,0 +1,84 @@ +cmake_minimum_required (VERSION 3.9) +project(maxminddb + LANGUAGES C + VERSION 1.5.2 +) +set(MAXMINDDB_SOVERSION 0.0.7) + +option(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" OFF) +option(BUILD_TESTING "Build test programs" ON) + +include(CheckTypeSize) +check_type_size("unsigned __int128" UINT128) +check_type_size("unsigned int __attribute__((mode(TI)))" UINT128_USING_MODE) +if(HAVE_UINT128) + set(MMDB_UINT128_USING_MODE 0) + set(MMDB_UINT128_IS_BYTE_ARRAY 0) +elseif(HAVE_UINT128_USING_MODE) + set(MMDB_UINT128_USING_MODE 1) + set(MMDB_UINT128_IS_BYTE_ARRAY 0) +else() + set(MMDB_UINT128_USING_MODE 0) + set(MMDB_UINT128_IS_BYTE_ARRAY 1) +endif() + +include (TestBigEndian) +TEST_BIG_ENDIAN(IS_BIG_ENDIAN) + +if (${CMAKE_SYSTEM_NAME} MATCHES "Linux") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) +endif() + +configure_file(${PROJECT_SOURCE_DIR}/include/maxminddb_config.h.cmake.in + ${PROJECT_SOURCE_DIR}/include/maxminddb_config.h) + +add_library(maxminddb + src/maxminddb.c + src/data-pool.c +) +add_library(maxminddb::maxminddb ALIAS maxminddb) +target_compile_options(maxminddb PRIVATE -Wno-pedantic) + +set_target_properties(maxminddb PROPERTIES VERSION ${MAXMINDDB_SOVERSION}) + +target_compile_definitions(maxminddb PUBLIC PACKAGE_VERSION="${PROJECT_VERSION}") + +if(NOT IS_BIG_ENDIAN) + target_compile_definitions(maxminddb PRIVATE MMDB_LITTLE_ENDIAN=1) +endif() + +if(MSVC) + target_compile_definitions(maxminddb PRIVATE _CRT_SECURE_NO_WARNINGS) +endif() + +if(WIN32) + target_link_libraries(maxminddb ws2_32) +endif() + +set(CMAKE_SHARED_LIBRARY_PREFIX lib) +set(CMAKE_STATIC_LIBRARY_PREFIX lib) + +target_include_directories(maxminddb + PUBLIC + $ + $/include> +) + +set(MAXMINDB_HEADERS + include/maxminddb.h + include/maxminddb_config.h +) +set_target_properties(maxminddb PROPERTIES PUBLIC_HEADER "${MAXMINDB_HEADERS}") + +install(TARGETS maxminddb + EXPORT maxminddb + ARCHIVE DESTINATION lib + PUBLIC_HEADER DESTINATION include/ +) + +# This is required to work with FetchContent +install(EXPORT maxminddb + FILE maxminddb-config.cmake + NAMESPACE maxminddb:: + DESTINATION lib/cmake/maxminddb) + diff --git a/3rd/libmaxminddb/LICENSE b/3rd/libmaxminddb/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/3rd/libmaxminddb/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/3rd/libmaxminddb/README.md b/3rd/libmaxminddb/README.md new file mode 100644 index 000000000..0a85473b5 --- /dev/null +++ b/3rd/libmaxminddb/README.md @@ -0,0 +1,133 @@ +# About + +The libmaxminddb library provides a C library for reading MaxMind DB files, +including the GeoIP2 databases from MaxMind. This is a custom binary format +designed to facilitate fast lookups of IP addresses while allowing for great +flexibility in the type of data associated with an address. + +The MaxMind DB format is an open format. The spec is available at +http://maxmind.github.io/MaxMind-DB/. This spec is licensed under the Creative +Commons Attribution-ShareAlike 3.0 Unported License. + +See http://dev.maxmind.com/ for more details about MaxMind's GeoIP2 products. + +# License + +This library is licensed under the Apache License, Version 2. + +# Installation + +## From a Named Release Tarball + +**NOTE:** These instructions are for installation from the _named_ `.tar.gz` +tarballs on the [Releases](https://github.com/maxmind/libmaxminddb/releases) +page (e.g. `libmaxminddb-*.tar.gz`). + +This code is known to work with GCC 4.4+ and clang 3.2+. It should also work +on other compilers that supports C99, POSIX.1-2001, and the `-fms-extensions +flag` (or equivalent). The latter is needed to allow an anonymous union in a +structure. + +To install this code, run the following commands: + + $ ./configure + $ make + $ make check + $ sudo make install + $ sudo ldconfig + +You can skip the `make check` step but it's always good to know that tests are +passing on your platform. + +The `configure` script takes the standard options to set where files are +installed such as `--prefix`, etc. See `./configure --help` for details. + +If after installing, you receive an error that `libmaxminddb.so.0` is missing +you may need to add the `lib` directory in your `prefix` to your library path. +On most Linux distributions when using the default prefix (`/usr/local`), you +can do this by running the following commands: + + $ sudo sh -c "echo /usr/local/lib >> /etc/ld.so.conf.d/local.conf" + $ ldconfig + +## From a GitHub "Source Code" Archive / Git Repo Clone (Achtung!) + +**NOTE:** These instructions are for installation from the GitHub "Source +Code" archives also available on the +[Releases](https://github.com/maxmind/libmaxminddb/releases) page (e.g. +`X.Y.Z.zip` or `X.Y.Z.tar.gz`), as well as installation directly from a clone +of the [Git repo](https://github.com/maxmind/libmaxminddb). Installation from +these sources are possible but will present challenges to users not +comfortable with manual dependency resolution. + +You will need `automake`, `autoconf`, and `libtool` installed +in addition to `make` and a compiler. + +You can clone this repository and build it by running: + + $ git clone --recursive https://github.com/maxmind/libmaxminddb + +After cloning, run `./bootstrap` from the `libmaxminddb` directory and then +follow the instructions for installing from a named release tarball as described above. + +## Using CMake + +We provide a CMake build script. This is primarily targeted at Windows users, +but it can be used in other circumstances where the Autotools script does not +work. + + $ cmake --build . + $ ctest -V . + $ cmake --build . --target install + +## On Ubuntu via PPA + +MaxMind provides a PPA for recent version of Ubuntu. To add the PPA to your +APT sources, run: + + $ sudo add-apt-repository ppa:maxmind/ppa + +Then install the packages by running: + + $ sudo apt update + $ sudo apt install libmaxminddb0 libmaxminddb-dev mmdb-bin + +## On OS X via Homebrew + +If you are on OS X and you have homebrew (see http://brew.sh/) you can install +libmaxminddb via brew. + + $ brew install libmaxminddb + +# Bug Reports + +Please report bugs by filing an issue with our GitHub issue tracker at +https://github.com/maxmind/libmaxminddb/issues + +# Dev Tools + +We have a few development tools under the `dev-bin` directory to make +development easier. These are written in Perl or shell. They are: + +* `uncrustify-all.sh` - This runs `uncrustify` on all the code. Please run + this before submitting patches. +* `valgrind-all.pl` - This runs Valgrind on the tests and `mmdblookup` to + check for memory leaks. + +# Creating a Release Tarball + +Use `make safedist` to check the resulting tarball. + +# Copyright and License + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/3rd/libmaxminddb/include/maxminddb.h b/3rd/libmaxminddb/include/maxminddb.h new file mode 100644 index 000000000..0e4c096de --- /dev/null +++ b/3rd/libmaxminddb/include/maxminddb.h @@ -0,0 +1,258 @@ +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef MAXMINDDB_H +#define MAXMINDDB_H + +/* Request POSIX.1-2008. However, we want to remain compatible with + * POSIX.1-2001 (since we have been historically and see no reason to drop + * compatibility). By requesting POSIX.1-2008, we can conditionally use + * features provided by that standard if the implementation provides it. We can + * check for what the implementation provides by checking the _POSIX_VERSION + * macro after including unistd.h. If a feature is in POSIX.1-2008 but not + * POSIX.1-2001, check that macro before using the feature (or check for the + * feature directly if possible). */ +#ifndef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 200809L +#endif + +#include "maxminddb_config.h" +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +/* libmaxminddb package version from configure */ +#define PACKAGE_VERSION "1.5.2" + +typedef ADDRESS_FAMILY sa_family_t; + +#if defined(_MSC_VER) +/* MSVC doesn't define signed size_t, copy it from configure */ +#define ssize_t SSIZE_T + +/* MSVC doesn't support restricted pointers */ +#define restrict +#endif +#else +#include +#include +#include +#endif + +#define MMDB_DATA_TYPE_EXTENDED (0) +#define MMDB_DATA_TYPE_POINTER (1) +#define MMDB_DATA_TYPE_UTF8_STRING (2) +#define MMDB_DATA_TYPE_DOUBLE (3) +#define MMDB_DATA_TYPE_BYTES (4) +#define MMDB_DATA_TYPE_UINT16 (5) +#define MMDB_DATA_TYPE_UINT32 (6) +#define MMDB_DATA_TYPE_MAP (7) +#define MMDB_DATA_TYPE_INT32 (8) +#define MMDB_DATA_TYPE_UINT64 (9) +#define MMDB_DATA_TYPE_UINT128 (10) +#define MMDB_DATA_TYPE_ARRAY (11) +#define MMDB_DATA_TYPE_CONTAINER (12) +#define MMDB_DATA_TYPE_END_MARKER (13) +#define MMDB_DATA_TYPE_BOOLEAN (14) +#define MMDB_DATA_TYPE_FLOAT (15) + +#define MMDB_RECORD_TYPE_SEARCH_NODE (0) +#define MMDB_RECORD_TYPE_EMPTY (1) +#define MMDB_RECORD_TYPE_DATA (2) +#define MMDB_RECORD_TYPE_INVALID (3) + +/* flags for open */ +#define MMDB_MODE_MMAP (1) +#define MMDB_MODE_MASK (7) + +/* error codes */ +#define MMDB_SUCCESS (0) +#define MMDB_FILE_OPEN_ERROR (1) +#define MMDB_CORRUPT_SEARCH_TREE_ERROR (2) +#define MMDB_INVALID_METADATA_ERROR (3) +#define MMDB_IO_ERROR (4) +#define MMDB_OUT_OF_MEMORY_ERROR (5) +#define MMDB_UNKNOWN_DATABASE_FORMAT_ERROR (6) +#define MMDB_INVALID_DATA_ERROR (7) +#define MMDB_INVALID_LOOKUP_PATH_ERROR (8) +#define MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR (9) +#define MMDB_INVALID_NODE_NUMBER_ERROR (10) +#define MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR (11) + +#if !(MMDB_UINT128_IS_BYTE_ARRAY) +#if MMDB_UINT128_USING_MODE +typedef unsigned int mmdb_uint128_t __attribute__((__mode__(TI))); +#else +typedef unsigned __int128 mmdb_uint128_t; +#endif +#endif + +/* This is a pointer into the data section for a given IP address lookup */ +typedef struct MMDB_entry_s { + const struct MMDB_s *mmdb; + uint32_t offset; +} MMDB_entry_s; + +typedef struct MMDB_lookup_result_s { + bool found_entry; + MMDB_entry_s entry; + uint16_t netmask; +} MMDB_lookup_result_s; + +typedef struct MMDB_entry_data_s { + bool has_data; + union { + uint32_t pointer; + const char *utf8_string; + double double_value; + const uint8_t *bytes; + uint16_t uint16; + uint32_t uint32; + int32_t int32; + uint64_t uint64; +#if MMDB_UINT128_IS_BYTE_ARRAY + uint8_t uint128[16]; +#else + mmdb_uint128_t uint128; +#endif + bool boolean; + float float_value; + }; + /* This is a 0 if a given entry cannot be found. This can only happen + * when a call to MMDB_(v)get_value() asks for hash keys or array + * indices that don't exist. */ + uint32_t offset; + /* This is the next entry in the data section, but it's really only + * relevant for entries that part of a larger map or array + * struct. There's no good reason for an end user to look at this + * directly. */ + uint32_t offset_to_next; + /* This is only valid for strings, utf8_strings or binary data */ + uint32_t data_size; + /* This is an MMDB_DATA_TYPE_* constant */ + uint32_t type; +} MMDB_entry_data_s; + +/* This is the return type when someone asks for all the entry data in a map or + * array */ +typedef struct MMDB_entry_data_list_s { + MMDB_entry_data_s entry_data; + struct MMDB_entry_data_list_s *next; + void *pool; +} MMDB_entry_data_list_s; + +typedef struct MMDB_description_s { + const char *language; + const char *description; +} MMDB_description_s; + +/* WARNING: do not add new fields to this struct without bumping the SONAME. + * The struct is allocated by the users of this library and increasing the + * size will cause existing users to allocate too little space when the shared + * library is upgraded */ +typedef struct MMDB_metadata_s { + uint32_t node_count; + uint16_t record_size; + uint16_t ip_version; + const char *database_type; + struct { + size_t count; + const char **names; + } languages; + uint16_t binary_format_major_version; + uint16_t binary_format_minor_version; + uint64_t build_epoch; + struct { + size_t count; + MMDB_description_s **descriptions; + } description; + /* See above warning before adding fields */ +} MMDB_metadata_s; + +/* WARNING: do not add new fields to this struct without bumping the SONAME. + * The struct is allocated by the users of this library and increasing the + * size will cause existing users to allocate too little space when the shared + * library is upgraded */ +typedef struct MMDB_ipv4_start_node_s { + uint16_t netmask; + uint32_t node_value; + /* See above warning before adding fields */ +} MMDB_ipv4_start_node_s; + +/* WARNING: do not add new fields to this struct without bumping the SONAME. + * The struct is allocated by the users of this library and increasing the + * size will cause existing users to allocate too little space when the shared + * library is upgraded */ +typedef struct MMDB_s { + uint32_t flags; + const char *filename; + ssize_t file_size; + const uint8_t *file_content; + const uint8_t *data_section; + uint32_t data_section_size; + const uint8_t *metadata_section; + uint32_t metadata_section_size; + uint16_t full_record_byte_size; + uint16_t depth; + MMDB_ipv4_start_node_s ipv4_start_node; + MMDB_metadata_s metadata; + /* See above warning before adding fields */ +} MMDB_s; + +typedef struct MMDB_search_node_s { + uint64_t left_record; + uint64_t right_record; + uint8_t left_record_type; + uint8_t right_record_type; + MMDB_entry_s left_record_entry; + MMDB_entry_s right_record_entry; +} MMDB_search_node_s; + +extern int +MMDB_open(const char *const filename, uint32_t flags, MMDB_s *const mmdb); +extern MMDB_lookup_result_s MMDB_lookup_string(const MMDB_s *const mmdb, + const char *const ipstr, + int *const gai_error, + int *const mmdb_error); +extern MMDB_lookup_result_s +MMDB_lookup_sockaddr(const MMDB_s *const mmdb, + const struct sockaddr *const sockaddr, + int *const mmdb_error); +extern int MMDB_read_node(const MMDB_s *const mmdb, + uint32_t node_number, + MMDB_search_node_s *const node); +extern int MMDB_get_value(MMDB_entry_s *const start, + MMDB_entry_data_s *const entry_data, + ...); +extern int MMDB_vget_value(MMDB_entry_s *const start, + MMDB_entry_data_s *const entry_data, + va_list va_path); +extern int MMDB_aget_value(MMDB_entry_s *const start, + MMDB_entry_data_s *const entry_data, + const char *const *const path); +extern int MMDB_get_metadata_as_entry_data_list( + const MMDB_s *const mmdb, MMDB_entry_data_list_s **const entry_data_list); +extern int +MMDB_get_entry_data_list(MMDB_entry_s *start, + MMDB_entry_data_list_s **const entry_data_list); +extern void +MMDB_free_entry_data_list(MMDB_entry_data_list_s *const entry_data_list); +extern void MMDB_close(MMDB_s *const mmdb); +extern const char *MMDB_lib_version(void); +extern int +MMDB_dump_entry_data_list(FILE *const stream, + MMDB_entry_data_list_s *const entry_data_list, + int indent); +extern const char *MMDB_strerror(int error_code); + +#endif /* MAXMINDDB_H */ + +#ifdef __cplusplus +} +#endif diff --git a/3rd/libmaxminddb/include/maxminddb_config.h b/3rd/libmaxminddb/include/maxminddb_config.h new file mode 100644 index 000000000..28eaf3c57 --- /dev/null +++ b/3rd/libmaxminddb/include/maxminddb_config.h @@ -0,0 +1,14 @@ +#ifndef MAXMINDDB_CONFIG_H +#define MAXMINDDB_CONFIG_H + +#ifndef MMDB_UINT128_USING_MODE +/* Define as 1 if we use unsigned int __atribute__ ((__mode__(TI))) for uint128 values */ +/* #undef MMDB_UINT128_USING_MODE */ +#endif + +#ifndef MMDB_UINT128_IS_BYTE_ARRAY +/* Define as 1 if we don't have an unsigned __int128 type */ +/* #undef MMDB_UINT128_IS_BYTE_ARRAY */ +#endif + +#endif /* MAXMINDDB_CONFIG_H */ diff --git a/3rd/libmaxminddb/include/maxminddb_config.h.cmake.in b/3rd/libmaxminddb/include/maxminddb_config.h.cmake.in new file mode 100644 index 000000000..8b1977f86 --- /dev/null +++ b/3rd/libmaxminddb/include/maxminddb_config.h.cmake.in @@ -0,0 +1,14 @@ +#ifndef MAXMINDDB_CONFIG_H +#define MAXMINDDB_CONFIG_H + +#ifndef MMDB_UINT128_USING_MODE +/* Define as 1 if we use unsigned int __atribute__ ((__mode__(TI))) for uint128 values */ +#cmakedefine MMDB_UINT128_USING_MODE @MMDB_UINT128_USING_MODE@ +#endif + +#ifndef MMDB_UINT128_IS_BYTE_ARRAY +/* Define as 1 if we don't have an unsigned __int128 type */ +#cmakedefine MMDB_UINT128_IS_BYTE_ARRAY @MMDB_UINT128_IS_BYTE_ARRAY@ +#endif + +#endif /* MAXMINDDB_CONFIG_H */ diff --git a/3rd/libmaxminddb/include/maxminddb_config.h.in b/3rd/libmaxminddb/include/maxminddb_config.h.in new file mode 100644 index 000000000..314d559d3 --- /dev/null +++ b/3rd/libmaxminddb/include/maxminddb_config.h.in @@ -0,0 +1,14 @@ +#ifndef MAXMINDDB_CONFIG_H +#define MAXMINDDB_CONFIG_H + +#ifndef MMDB_UINT128_USING_MODE +/* Define as 1 if we use unsigned int __atribute__ ((__mode__(TI))) for uint128 values */ +#define MMDB_UINT128_USING_MODE 0 +#endif + +#ifndef MMDB_UINT128_IS_BYTE_ARRAY +/* Define as 1 if we don't have an unsigned __int128 type */ +#undef MMDB_UINT128_IS_BYTE_ARRAY +#endif + +#endif /* MAXMINDDB_CONFIG_H */ diff --git a/3rd/libmaxminddb/src/Makefile.am b/3rd/libmaxminddb/src/Makefile.am new file mode 100644 index 000000000..6d57acaae --- /dev/null +++ b/3rd/libmaxminddb/src/Makefile.am @@ -0,0 +1,25 @@ +include $(top_srcdir)/common.mk + +lib_LTLIBRARIES = libmaxminddb.la + +libmaxminddb_la_SOURCES = maxminddb.c maxminddb-compat-util.h \ + data-pool.c data-pool.h +libmaxminddb_la_LDFLAGS = -version-info 0:7:0 -export-symbols-regex '^MMDB_.*' +include_HEADERS = $(top_srcdir)/include/maxminddb.h + +pkgconfig_DATA = libmaxminddb.pc + +TESTS = test-data-pool + +check_PROGRAMS = test-data-pool + +test_data_pool_SOURCES = data-pool.c data-pool.h +test_data_pool_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/t -DTEST_DATA_POOL +test_data_pool_LDADD = $(top_srcdir)/t/libmmdbtest.la \ + $(top_srcdir)/t/libtap/libtap.a + +$(top_srcdir)/t/libmmdbtest.la: + $(MAKE) -C $(top_srcdir)/t libmmdbtest.la + +$(top_srcdir)/t/libtap/libtap.a: + $(MAKE) -C $(top_srcdir)/t/libtap libtap.a diff --git a/3rd/libmaxminddb/src/Makefile.in b/3rd/libmaxminddb/src/Makefile.in new file mode 100644 index 000000000..fa507fb61 --- /dev/null +++ b/3rd/libmaxminddb/src/Makefile.in @@ -0,0 +1,887 @@ +# Makefile.in generated by automake 1.16.2 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2020 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + + + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +TESTS = test-data-pool$(EXEEXT) +check_PROGRAMS = test-data-pool$(EXEEXT) +subdir = src +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(include_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h \ + $(top_builddir)/include/maxminddb_config.h +CONFIG_CLEAN_FILES = libmaxminddb.pc +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pkgconfigdir)" \ + "$(DESTDIR)$(includedir)" +LTLIBRARIES = $(lib_LTLIBRARIES) +libmaxminddb_la_LIBADD = +am_libmaxminddb_la_OBJECTS = maxminddb.lo data-pool.lo +libmaxminddb_la_OBJECTS = $(am_libmaxminddb_la_OBJECTS) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +libmaxminddb_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) $(libmaxminddb_la_LDFLAGS) $(LDFLAGS) \ + -o $@ +am_test_data_pool_OBJECTS = test_data_pool-data-pool.$(OBJEXT) +test_data_pool_OBJECTS = $(am_test_data_pool_OBJECTS) +test_data_pool_DEPENDENCIES = $(top_srcdir)/t/libmmdbtest.la \ + $(top_srcdir)/t/libtap/libtap.a +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/include +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/data-pool.Plo \ + ./$(DEPDIR)/maxminddb.Plo \ + ./$(DEPDIR)/test_data_pool-data-pool.Po +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(libmaxminddb_la_SOURCES) $(test_data_pool_SOURCES) +DIST_SOURCES = $(libmaxminddb_la_SOURCES) $(test_data_pool_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +DATA = $(pkgconfig_DATA) +HEADERS = $(include_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__tty_colors_dummy = \ + mgn= red= grn= lgn= blu= brg= std=; \ + am__color_tests=no +am__tty_colors = { \ + $(am__tty_colors_dummy); \ + if test "X$(AM_COLOR_TESTS)" = Xno; then \ + am__color_tests=no; \ + elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ + am__color_tests=yes; \ + elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ + am__color_tests=yes; \ + fi; \ + if test $$am__color_tests = yes; then \ + red=''; \ + grn=''; \ + lgn=''; \ + blu=''; \ + mgn=''; \ + brg=''; \ + std=''; \ + fi; \ +} +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/libmaxminddb.pc.in \ + $(top_srcdir)/common.mk $(top_srcdir)/depcomp +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +pkgconfigdir = @pkgconfigdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +@DEBUG_FALSE@AM_CFLAGS = -O2 -g +@DEBUG_TRUE@AM_CFLAGS = -O0 -g -Wall -Wextra +AM_CPPFLAGS = -I$(top_srcdir)/include +lib_LTLIBRARIES = libmaxminddb.la +libmaxminddb_la_SOURCES = maxminddb.c maxminddb-compat-util.h \ + data-pool.c data-pool.h + +libmaxminddb_la_LDFLAGS = -version-info 0:7:0 -export-symbols-regex '^MMDB_.*' +include_HEADERS = $(top_srcdir)/include/maxminddb.h +pkgconfig_DATA = libmaxminddb.pc +test_data_pool_SOURCES = data-pool.c data-pool.h +test_data_pool_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/t -DTEST_DATA_POOL +test_data_pool_LDADD = $(top_srcdir)/t/libmmdbtest.la \ + $(top_srcdir)/t/libtap/libtap.a + +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(top_srcdir)/common.mk $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; +$(top_srcdir)/common.mk $(am__empty): + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +libmaxminddb.pc: $(top_builddir)/config.status $(srcdir)/libmaxminddb.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +clean-checkPROGRAMS: + @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ + } + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +libmaxminddb.la: $(libmaxminddb_la_OBJECTS) $(libmaxminddb_la_DEPENDENCIES) $(EXTRA_libmaxminddb_la_DEPENDENCIES) + $(AM_V_CCLD)$(libmaxminddb_la_LINK) -rpath $(libdir) $(libmaxminddb_la_OBJECTS) $(libmaxminddb_la_LIBADD) $(LIBS) + +test-data-pool$(EXEEXT): $(test_data_pool_OBJECTS) $(test_data_pool_DEPENDENCIES) $(EXTRA_test_data_pool_DEPENDENCIES) + @rm -f test-data-pool$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(test_data_pool_OBJECTS) $(test_data_pool_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/data-pool.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/maxminddb.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_data_pool-data-pool.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< + +test_data_pool-data-pool.o: data-pool.c +@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_data_pool_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT test_data_pool-data-pool.o -MD -MP -MF $(DEPDIR)/test_data_pool-data-pool.Tpo -c -o test_data_pool-data-pool.o `test -f 'data-pool.c' || echo '$(srcdir)/'`data-pool.c +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_data_pool-data-pool.Tpo $(DEPDIR)/test_data_pool-data-pool.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='data-pool.c' object='test_data_pool-data-pool.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_data_pool_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o test_data_pool-data-pool.o `test -f 'data-pool.c' || echo '$(srcdir)/'`data-pool.c + +test_data_pool-data-pool.obj: data-pool.c +@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_data_pool_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT test_data_pool-data-pool.obj -MD -MP -MF $(DEPDIR)/test_data_pool-data-pool.Tpo -c -o test_data_pool-data-pool.obj `if test -f 'data-pool.c'; then $(CYGPATH_W) 'data-pool.c'; else $(CYGPATH_W) '$(srcdir)/data-pool.c'; fi` +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/test_data_pool-data-pool.Tpo $(DEPDIR)/test_data_pool-data-pool.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='data-pool.c' object='test_data_pool-data-pool.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_data_pool_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o test_data_pool-data-pool.obj `if test -f 'data-pool.c'; then $(CYGPATH_W) 'data-pool.c'; else $(CYGPATH_W) '$(srcdir)/data-pool.c'; fi` + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) +install-includeHEADERS: $(include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \ + done + +uninstall-includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(include_HEADERS)'; test -n "$(includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +check-TESTS: $(TESTS) + @failed=0; all=0; xfail=0; xpass=0; skip=0; \ + srcdir=$(srcdir); export srcdir; \ + list=' $(TESTS) '; \ + $(am__tty_colors); \ + if test -n "$$list"; then \ + for tst in $$list; do \ + if test -f ./$$tst; then dir=./; \ + elif test -f $$tst; then dir=; \ + else dir="$(srcdir)/"; fi; \ + if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xpass=`expr $$xpass + 1`; \ + failed=`expr $$failed + 1`; \ + col=$$red; res=XPASS; \ + ;; \ + *) \ + col=$$grn; res=PASS; \ + ;; \ + esac; \ + elif test $$? -ne 77; then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xfail=`expr $$xfail + 1`; \ + col=$$lgn; res=XFAIL; \ + ;; \ + *) \ + failed=`expr $$failed + 1`; \ + col=$$red; res=FAIL; \ + ;; \ + esac; \ + else \ + skip=`expr $$skip + 1`; \ + col=$$blu; res=SKIP; \ + fi; \ + echo "$${col}$$res$${std}: $$tst"; \ + done; \ + if test "$$all" -eq 1; then \ + tests="test"; \ + All=""; \ + else \ + tests="tests"; \ + All="All "; \ + fi; \ + if test "$$failed" -eq 0; then \ + if test "$$xfail" -eq 0; then \ + banner="$$All$$all $$tests passed"; \ + else \ + if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ + banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ + fi; \ + else \ + if test "$$xpass" -eq 0; then \ + banner="$$failed of $$all $$tests failed"; \ + else \ + if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ + banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ + fi; \ + fi; \ + dashes="$$banner"; \ + skipped=""; \ + if test "$$skip" -ne 0; then \ + if test "$$skip" -eq 1; then \ + skipped="($$skip test was not run)"; \ + else \ + skipped="($$skip tests were not run)"; \ + fi; \ + test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$skipped"; \ + fi; \ + report=""; \ + if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ + report="Please report to $(PACKAGE_BUGREPORT)"; \ + test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$report"; \ + fi; \ + dashes=`echo "$$dashes" | sed s/./=/g`; \ + if test "$$failed" -eq 0; then \ + col="$$grn"; \ + else \ + col="$$red"; \ + fi; \ + echo "$${col}$$dashes$${std}"; \ + echo "$${col}$$banner$${std}"; \ + test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ + test -z "$$report" || echo "$${col}$$report$${std}"; \ + echo "$${col}$$dashes$${std}"; \ + test "$$failed" -eq 0; \ + else :; fi + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) + $(MAKE) $(AM_MAKEFLAGS) check-TESTS +check: check-am +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(pkgconfigdir)" "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-checkPROGRAMS clean-generic clean-libLTLIBRARIES \ + clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/data-pool.Plo + -rm -f ./$(DEPDIR)/maxminddb.Plo + -rm -f ./$(DEPDIR)/test_data_pool-data-pool.Po + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-includeHEADERS install-pkgconfigDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-libLTLIBRARIES + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/data-pool.Plo + -rm -f ./$(DEPDIR)/maxminddb.Plo + -rm -f ./$(DEPDIR)/test_data_pool-data-pool.Po + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-includeHEADERS uninstall-libLTLIBRARIES \ + uninstall-pkgconfigDATA + +.MAKE: check-am install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-TESTS \ + check-am clean clean-checkPROGRAMS clean-generic \ + clean-libLTLIBRARIES clean-libtool cscopelist-am ctags \ + ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am \ + install-includeHEADERS install-info install-info-am \ + install-libLTLIBRARIES install-man install-pdf install-pdf-am \ + install-pkgconfigDATA install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am uninstall-includeHEADERS \ + uninstall-libLTLIBRARIES uninstall-pkgconfigDATA + +.PRECIOUS: Makefile + + +$(top_srcdir)/t/libmmdbtest.la: + $(MAKE) -C $(top_srcdir)/t libmmdbtest.la + +$(top_srcdir)/t/libtap/libtap.a: + $(MAKE) -C $(top_srcdir)/t/libtap libtap.a + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/3rd/libmaxminddb/src/data-pool.c b/3rd/libmaxminddb/src/data-pool.c new file mode 100644 index 000000000..7b3c2a596 --- /dev/null +++ b/3rd/libmaxminddb/src/data-pool.c @@ -0,0 +1,169 @@ +#include "data-pool.h" +#include "maxminddb.h" + +#include +#include +#include + +static bool can_multiply(size_t const, size_t const, size_t const); + +// Allocate an MMDB_data_pool_s. It initially has space for size +// MMDB_entry_data_list_s structs. +MMDB_data_pool_s *data_pool_new(size_t const size) { + MMDB_data_pool_s *const pool = calloc(1, sizeof(MMDB_data_pool_s)); + if (!pool) { + return NULL; + } + + if (size == 0 || + !can_multiply(SIZE_MAX, size, sizeof(MMDB_entry_data_list_s))) { + data_pool_destroy(pool); + return NULL; + } + pool->size = size; + pool->blocks[0] = calloc(pool->size, sizeof(MMDB_entry_data_list_s)); + if (!pool->blocks[0]) { + data_pool_destroy(pool); + return NULL; + } + pool->blocks[0]->pool = pool; + + pool->sizes[0] = size; + + pool->block = pool->blocks[0]; + + return pool; +} + +// Determine if we can multiply m*n. We can do this if the result will be below +// the given max. max will typically be SIZE_MAX. +// +// We want to know if we'll wrap around. +static bool can_multiply(size_t const max, size_t const m, size_t const n) { + if (m == 0) { + return false; + } + + return n <= max / m; +} + +// Clean up the data pool. +void data_pool_destroy(MMDB_data_pool_s *const pool) { + if (!pool) { + return; + } + + for (size_t i = 0; i <= pool->index; i++) { + free(pool->blocks[i]); + } + + free(pool); +} + +// Claim a new struct from the pool. Doing this may cause the pool's size to +// grow. +MMDB_entry_data_list_s *data_pool_alloc(MMDB_data_pool_s *const pool) { + if (!pool) { + return NULL; + } + + if (pool->used < pool->size) { + MMDB_entry_data_list_s *const element = pool->block + pool->used; + pool->used++; + return element; + } + + // Take it from a new block of memory. + + size_t const new_index = pool->index + 1; + if (new_index == DATA_POOL_NUM_BLOCKS) { + // See the comment about not growing this on DATA_POOL_NUM_BLOCKS. + return NULL; + } + + if (!can_multiply(SIZE_MAX, pool->size, 2)) { + return NULL; + } + size_t const new_size = pool->size * 2; + + if (!can_multiply(SIZE_MAX, new_size, sizeof(MMDB_entry_data_list_s))) { + return NULL; + } + pool->blocks[new_index] = calloc(new_size, sizeof(MMDB_entry_data_list_s)); + if (!pool->blocks[new_index]) { + return NULL; + } + + // We don't need to set this, but it's useful for introspection in tests. + pool->blocks[new_index]->pool = pool; + + pool->index = new_index; + pool->block = pool->blocks[pool->index]; + + pool->size = new_size; + pool->sizes[pool->index] = pool->size; + + MMDB_entry_data_list_s *const element = pool->block; + pool->used = 1; + return element; +} + +// Turn the structs in the array-like pool into a linked list. +// +// Before calling this function, the list isn't linked up. +MMDB_entry_data_list_s *data_pool_to_list(MMDB_data_pool_s *const pool) { + if (!pool) { + return NULL; + } + + if (pool->index == 0 && pool->used == 0) { + return NULL; + } + + for (size_t i = 0; i <= pool->index; i++) { + MMDB_entry_data_list_s *const block = pool->blocks[i]; + + size_t size = pool->sizes[i]; + if (i == pool->index) { + size = pool->used; + } + + for (size_t j = 0; j < size - 1; j++) { + MMDB_entry_data_list_s *const cur = block + j; + cur->next = block + j + 1; + } + + if (i < pool->index) { + MMDB_entry_data_list_s *const last = block + size - 1; + last->next = pool->blocks[i + 1]; + } + } + + return pool->blocks[0]; +} + +#ifdef TEST_DATA_POOL + +#include +#include + +static void test_can_multiply(void); + +int main(void) { + plan(NO_PLAN); + test_can_multiply(); + done_testing(); +} + +static void test_can_multiply(void) { + { ok(can_multiply(SIZE_MAX, 1, SIZE_MAX), "1*SIZE_MAX is ok"); } + + { ok(!can_multiply(SIZE_MAX, 2, SIZE_MAX), "2*SIZE_MAX is not ok"); } + + { + ok(can_multiply(SIZE_MAX, 10240, sizeof(MMDB_entry_data_list_s)), + "1024 entry_data_list_s's are okay"); + } +} + +#endif diff --git a/3rd/libmaxminddb/src/data-pool.h b/3rd/libmaxminddb/src/data-pool.h new file mode 100644 index 000000000..25d09923e --- /dev/null +++ b/3rd/libmaxminddb/src/data-pool.h @@ -0,0 +1,52 @@ +#ifndef DATA_POOL_H +#define DATA_POOL_H + +#include "maxminddb.h" + +#include +#include + +// This should be large enough that we never need to grow the array of pointers +// to blocks. 32 is enough. Even starting out of with size 1 (1 struct), the +// 32nd element alone will provide 2**32 structs as we exponentially increase +// the number in each block. Being confident that we do not have to grow the +// array lets us avoid writing code to do that. That code would be risky as it +// would rarely be hit and likely not be well tested. +#define DATA_POOL_NUM_BLOCKS 32 + +// A pool of memory for MMDB_entry_data_list_s structs. This is so we can +// allocate multiple up front rather than one at a time for performance +// reasons. +// +// The order you add elements to it (by calling data_pool_alloc()) ends up as +// the order of the list. +// +// The memory only grows. There is no support for releasing an element you take +// back to the pool. +typedef struct MMDB_data_pool_s { + // Index of the current block we're allocating out of. + size_t index; + + // The size of the current block, counting by structs. + size_t size; + + // How many used in the current block, counting by structs. + size_t used; + + // The current block we're allocating out of. + MMDB_entry_data_list_s *block; + + // The size of each block. + size_t sizes[DATA_POOL_NUM_BLOCKS]; + + // An array of pointers to blocks of memory holding space for list + // elements. + MMDB_entry_data_list_s *blocks[DATA_POOL_NUM_BLOCKS]; +} MMDB_data_pool_s; + +MMDB_data_pool_s *data_pool_new(size_t const); +void data_pool_destroy(MMDB_data_pool_s *const); +MMDB_entry_data_list_s *data_pool_alloc(MMDB_data_pool_s *const); +MMDB_entry_data_list_s *data_pool_to_list(MMDB_data_pool_s *const); + +#endif diff --git a/3rd/libmaxminddb/src/libmaxminddb.pc.in b/3rd/libmaxminddb/src/libmaxminddb.pc.in new file mode 100644 index 000000000..00ced3ba9 --- /dev/null +++ b/3rd/libmaxminddb/src/libmaxminddb.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libmaxminddb +Description: C library for the MaxMind DB file format +URL: http://maxmind.github.io/libmaxminddb/ +Version: @PACKAGE_VERSION@ +Libs: -L${libdir} -lmaxminddb +Cflags: -I${includedir} diff --git a/3rd/libmaxminddb/src/maxminddb-compat-util.h b/3rd/libmaxminddb/src/maxminddb-compat-util.h new file mode 100644 index 000000000..5d7199439 --- /dev/null +++ b/3rd/libmaxminddb/src/maxminddb-compat-util.h @@ -0,0 +1,160 @@ +#include +#include + +/* *INDENT-OFF* */ + +/* The memmem, strdup, and strndup functions were all copied from the + * FreeBSD source, along with the relevant copyright notice. + * + * It'd be nicer to simply use the functions available on the system if they + * exist, but there doesn't seem to be a good way to detect them without also + * defining things like _GNU_SOURCE, which we want to avoid, because then we + * end up _accidentally_ using GNU features without noticing, which then + * breaks on systems like OSX. + * + * C is fun! */ + +/* Applies to memmem implementation */ +/*- + * Copyright (c) 2005 Pascal Gloor + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +static void * +mmdb_memmem(const void *l, size_t l_len, const void *s, size_t s_len) { + register char *cur, *last; + const char *cl = (const char *)l; + const char *cs = (const char *)s; + + /* we need something to compare */ + if (l_len == 0 || s_len == 0) + return NULL; + + /* "s" must be smaller or equal to "l" */ + if (l_len < s_len) + return NULL; + + /* special case where s_len == 1 */ + if (s_len == 1) + return memchr(l, (int)*cs, l_len); + + /* the last position where its possible to find "s" in "l" */ + last = (char *)cl + l_len - s_len; + + for (cur = (char *)cl; cur <= last; cur++) + if (cur[0] == cs[0] && memcmp(cur, cs, s_len) == 0) + return cur; + + return NULL; +} + +/* Applies to strnlen implementation */ +/*- + * Copyright (c) 2009 David Schultz + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +static size_t mmdb_strnlen(const char *s, size_t maxlen) { + size_t len; + + for (len = 0; len < maxlen; len++, s++) { + if (!*s) + break; + } + return (len); +} + +/* Applies to strdup and strndup implementation */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +static char *mmdb_strdup(const char *str) { + size_t len; + char *copy; + + len = strlen(str) + 1; + if ((copy = malloc(len)) == NULL) + return (NULL); + memcpy(copy, str, len); + return (copy); +} + +static char *mmdb_strndup(const char *str, size_t n) { + size_t len; + char *copy; + + len = mmdb_strnlen(str, n); + if ((copy = malloc(len + 1)) == NULL) + return (NULL); + memcpy(copy, str, len); + copy[len] = '\0'; + return (copy); +} +/* *INDENT-ON* */ diff --git a/3rd/libmaxminddb/src/maxminddb.c b/3rd/libmaxminddb/src/maxminddb.c new file mode 100644 index 000000000..5e97426cf --- /dev/null +++ b/3rd/libmaxminddb/src/maxminddb.c @@ -0,0 +1,2092 @@ +#if HAVE_CONFIG_H +#include +#endif +#include "data-pool.h" +#include "maxminddb-compat-util.h" +#include "maxminddb.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#ifndef UNICODE +#define UNICODE +#endif +#include +#include +#else +#include +#include +#include +#endif + +#define MMDB_DATA_SECTION_SEPARATOR (16) +#define MAXIMUM_DATA_STRUCTURE_DEPTH (512) + +#ifdef MMDB_DEBUG +#define DEBUG_MSG(msg) fprintf(stderr, msg "\n") +#define DEBUG_MSGF(fmt, ...) fprintf(stderr, fmt "\n", __VA_ARGS__) +#define DEBUG_BINARY(fmt, byte) \ + do { \ + char *binary = byte_to_binary(byte); \ + if (NULL == binary) { \ + fprintf(stderr, "Calloc failed in DEBUG_BINARY\n"); \ + abort(); \ + } \ + fprintf(stderr, fmt "\n", binary); \ + free(binary); \ + } while (0) +#define DEBUG_NL fprintf(stderr, "\n") +#else +#define DEBUG_MSG(...) +#define DEBUG_MSGF(...) +#define DEBUG_BINARY(...) +#define DEBUG_NL +#endif + +#ifdef MMDB_DEBUG +char *byte_to_binary(uint8_t byte) { + char *bits = calloc(9, sizeof(char)); + if (NULL == bits) { + return bits; + } + + for (uint8_t i = 0; i < 8; i++) { + bits[i] = byte & (128 >> i) ? '1' : '0'; + } + bits[8] = '\0'; + + return bits; +} + +char *type_num_to_name(uint8_t num) { + switch (num) { + case 0: + return "extended"; + case 1: + return "pointer"; + case 2: + return "utf8_string"; + case 3: + return "double"; + case 4: + return "bytes"; + case 5: + return "uint16"; + case 6: + return "uint32"; + case 7: + return "map"; + case 8: + return "int32"; + case 9: + return "uint64"; + case 10: + return "uint128"; + case 11: + return "array"; + case 12: + return "container"; + case 13: + return "end_marker"; + case 14: + return "boolean"; + case 15: + return "float"; + default: + return "unknown type"; + } +} +#endif + +/* None of the values we check on the lhs are bigger than uint32_t, so on + * platforms where SIZE_MAX is a 64-bit integer, this would be a no-op, and it + * makes the compiler complain if we do the check anyway. */ +#if SIZE_MAX == UINT32_MAX +#define MAYBE_CHECK_SIZE_OVERFLOW(lhs, rhs, error) \ + if ((lhs) > (rhs)) { \ + return error; \ + } +#else +#define MAYBE_CHECK_SIZE_OVERFLOW(...) +#endif + +typedef struct record_info_s { + uint16_t record_length; + uint32_t (*left_record_getter)(const uint8_t *); + uint32_t (*right_record_getter)(const uint8_t *); + uint8_t right_record_offset; +} record_info_s; + +#define METADATA_MARKER "\xab\xcd\xefMaxMind.com" +/* This is 128kb */ +#define METADATA_BLOCK_MAX_SIZE 131072 + +// 64 leads us to allocating 4 KiB on a 64bit system. +#define MMDB_POOL_INIT_SIZE 64 + +static int map_file(MMDB_s *const mmdb); +static const uint8_t *find_metadata(const uint8_t *file_content, + ssize_t file_size, + uint32_t *metadata_size); +static int read_metadata(MMDB_s *mmdb); +static MMDB_s make_fake_metadata_db(const MMDB_s *const mmdb); +static int +value_for_key_as_uint16(MMDB_entry_s *start, char *key, uint16_t *value); +static int +value_for_key_as_uint32(MMDB_entry_s *start, char *key, uint32_t *value); +static int +value_for_key_as_uint64(MMDB_entry_s *start, char *key, uint64_t *value); +static int +value_for_key_as_string(MMDB_entry_s *start, char *key, char const **value); +static int populate_languages_metadata(MMDB_s *mmdb, + MMDB_s *metadata_db, + MMDB_entry_s *metadata_start); +static int populate_description_metadata(MMDB_s *mmdb, + MMDB_s *metadata_db, + MMDB_entry_s *metadata_start); +static int resolve_any_address(const char *ipstr, struct addrinfo **addresses); +static int find_address_in_search_tree(const MMDB_s *const mmdb, + uint8_t *address, + sa_family_t address_family, + MMDB_lookup_result_s *result); +static record_info_s record_info_for_database(const MMDB_s *const mmdb); +static int find_ipv4_start_node(MMDB_s *const mmdb); +static uint8_t record_type(const MMDB_s *const mmdb, uint64_t record); +static uint32_t get_left_28_bit_record(const uint8_t *record); +static uint32_t get_right_28_bit_record(const uint8_t *record); +static uint32_t data_section_offset_for_record(const MMDB_s *const mmdb, + uint64_t record); +static int path_length(va_list va_path); +static int lookup_path_in_array(const char *path_elem, + const MMDB_s *const mmdb, + MMDB_entry_data_s *entry_data); +static int lookup_path_in_map(const char *path_elem, + const MMDB_s *const mmdb, + MMDB_entry_data_s *entry_data); +static int skip_map_or_array(const MMDB_s *const mmdb, + MMDB_entry_data_s *entry_data); +static int decode_one_follow(const MMDB_s *const mmdb, + uint32_t offset, + MMDB_entry_data_s *entry_data); +static int decode_one(const MMDB_s *const mmdb, + uint32_t offset, + MMDB_entry_data_s *entry_data); +static int get_ext_type(int raw_ext_type); +static uint32_t +get_ptr_from(uint8_t ctrl, uint8_t const *const ptr, int ptr_size); +static int get_entry_data_list(const MMDB_s *const mmdb, + uint32_t offset, + MMDB_entry_data_list_s *const entry_data_list, + MMDB_data_pool_s *const pool, + int depth); +static float get_ieee754_float(const uint8_t *restrict p); +static double get_ieee754_double(const uint8_t *restrict p); +static uint32_t get_uint32(const uint8_t *p); +static uint32_t get_uint24(const uint8_t *p); +static uint32_t get_uint16(const uint8_t *p); +static uint64_t get_uintX(const uint8_t *p, int length); +static int32_t get_sintX(const uint8_t *p, int length); +static void free_mmdb_struct(MMDB_s *const mmdb); +static void free_languages_metadata(MMDB_s *mmdb); +static void free_descriptions_metadata(MMDB_s *mmdb); +static MMDB_entry_data_list_s * +dump_entry_data_list(FILE *stream, + MMDB_entry_data_list_s *entry_data_list, + int indent, + int *status); +static void print_indentation(FILE *stream, int i); +static char *bytes_to_hex(uint8_t *bytes, uint32_t size); + +#define CHECKED_DECODE_ONE(mmdb, offset, entry_data) \ + do { \ + int status = decode_one(mmdb, offset, entry_data); \ + if (MMDB_SUCCESS != status) { \ + DEBUG_MSGF("CHECKED_DECODE_ONE failed." \ + " status = %d (%s)", \ + status, \ + MMDB_strerror(status)); \ + return status; \ + } \ + } while (0) + +#define CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, entry_data) \ + do { \ + int status = decode_one_follow(mmdb, offset, entry_data); \ + if (MMDB_SUCCESS != status) { \ + DEBUG_MSGF("CHECKED_DECODE_ONE_FOLLOW failed." \ + " status = %d (%s)", \ + status, \ + MMDB_strerror(status)); \ + return status; \ + } \ + } while (0) + +#define FREE_AND_SET_NULL(p) \ + { \ + free((void *)(p)); \ + (p) = NULL; \ + } + +int MMDB_open(const char *const filename, uint32_t flags, MMDB_s *const mmdb) { + int status = MMDB_SUCCESS; + + mmdb->file_content = NULL; + mmdb->data_section = NULL; + mmdb->metadata.database_type = NULL; + mmdb->metadata.languages.count = 0; + mmdb->metadata.languages.names = NULL; + mmdb->metadata.description.count = 0; + + mmdb->filename = mmdb_strdup(filename); + if (NULL == mmdb->filename) { + status = MMDB_OUT_OF_MEMORY_ERROR; + goto cleanup; + } + + if ((flags & MMDB_MODE_MASK) == 0) { + flags |= MMDB_MODE_MMAP; + } + mmdb->flags = flags; + + if (MMDB_SUCCESS != (status = map_file(mmdb))) { + goto cleanup; + } + +#ifdef _WIN32 + WSADATA wsa; + WSAStartup(MAKEWORD(2, 2), &wsa); +#endif + + uint32_t metadata_size = 0; + const uint8_t *metadata = + find_metadata(mmdb->file_content, mmdb->file_size, &metadata_size); + if (NULL == metadata) { + status = MMDB_INVALID_METADATA_ERROR; + goto cleanup; + } + + mmdb->metadata_section = metadata; + mmdb->metadata_section_size = metadata_size; + + status = read_metadata(mmdb); + if (MMDB_SUCCESS != status) { + goto cleanup; + } + + if (mmdb->metadata.binary_format_major_version != 2) { + status = MMDB_UNKNOWN_DATABASE_FORMAT_ERROR; + goto cleanup; + } + + uint32_t search_tree_size = + mmdb->metadata.node_count * mmdb->full_record_byte_size; + + mmdb->data_section = + mmdb->file_content + search_tree_size + MMDB_DATA_SECTION_SEPARATOR; + if (search_tree_size + MMDB_DATA_SECTION_SEPARATOR > + (uint32_t)mmdb->file_size) { + status = MMDB_INVALID_METADATA_ERROR; + goto cleanup; + } + mmdb->data_section_size = (uint32_t)mmdb->file_size - search_tree_size - + MMDB_DATA_SECTION_SEPARATOR; + + // Although it is likely not possible to construct a database with valid + // valid metadata, as parsed above, and a data_section_size less than 3, + // we do this check as later we assume it is at least three when doing + // bound checks. + if (mmdb->data_section_size < 3) { + status = MMDB_INVALID_DATA_ERROR; + goto cleanup; + } + + mmdb->metadata_section = metadata; + mmdb->ipv4_start_node.node_value = 0; + mmdb->ipv4_start_node.netmask = 0; + + // We do this immediately as otherwise there is a race to set + // ipv4_start_node.node_value and ipv4_start_node.netmask. + if (mmdb->metadata.ip_version == 6) { + status = find_ipv4_start_node(mmdb); + if (status != MMDB_SUCCESS) { + goto cleanup; + } + } + +cleanup: + if (MMDB_SUCCESS != status) { + int saved_errno = errno; + free_mmdb_struct(mmdb); + errno = saved_errno; + } + return status; +} + +#ifdef _WIN32 + +static LPWSTR utf8_to_utf16(const char *utf8_str) { + int wide_chars = MultiByteToWideChar(CP_UTF8, 0, utf8_str, -1, NULL, 0); + wchar_t *utf16_str = (wchar_t *)calloc(wide_chars, sizeof(wchar_t)); + if (!utf16_str) { + return NULL; + } + + if (MultiByteToWideChar(CP_UTF8, 0, utf8_str, -1, utf16_str, wide_chars) < + 1) { + free(utf16_str); + return NULL; + } + + return utf16_str; +} + +static int map_file(MMDB_s *const mmdb) { + DWORD size; + int status = MMDB_SUCCESS; + HANDLE mmh = NULL; + HANDLE fd = INVALID_HANDLE_VALUE; + LPWSTR utf16_filename = utf8_to_utf16(mmdb->filename); + if (!utf16_filename) { + status = MMDB_FILE_OPEN_ERROR; + goto cleanup; + } + fd = CreateFileW(utf16_filename, + GENERIC_READ, + FILE_SHARE_READ, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + if (fd == INVALID_HANDLE_VALUE) { + status = MMDB_FILE_OPEN_ERROR; + goto cleanup; + } + size = GetFileSize(fd, NULL); + if (size == INVALID_FILE_SIZE) { + status = MMDB_FILE_OPEN_ERROR; + goto cleanup; + } + mmh = CreateFileMapping(fd, NULL, PAGE_READONLY, 0, size, NULL); + /* Microsoft documentation for CreateFileMapping indicates this returns + NULL not INVALID_HANDLE_VALUE on error */ + if (NULL == mmh) { + status = MMDB_IO_ERROR; + goto cleanup; + } + uint8_t *file_content = + (uint8_t *)MapViewOfFile(mmh, FILE_MAP_READ, 0, 0, 0); + if (file_content == NULL) { + status = MMDB_IO_ERROR; + goto cleanup; + } + + mmdb->file_size = size; + mmdb->file_content = file_content; + +cleanup:; + int saved_errno = errno; + if (INVALID_HANDLE_VALUE != fd) { + CloseHandle(fd); + } + if (NULL != mmh) { + CloseHandle(mmh); + } + errno = saved_errno; + free(utf16_filename); + + return status; +} + +#else // _WIN32 + +static int map_file(MMDB_s *const mmdb) { + ssize_t size; + int status = MMDB_SUCCESS; + + int flags = O_RDONLY; +#ifdef O_CLOEXEC + flags |= O_CLOEXEC; +#endif + int fd = open(mmdb->filename, flags); + struct stat s; + if (fd < 0 || fstat(fd, &s)) { + status = MMDB_FILE_OPEN_ERROR; + goto cleanup; + } + + size = s.st_size; + if (size < 0 || size != s.st_size) { + status = MMDB_OUT_OF_MEMORY_ERROR; + goto cleanup; + } + + uint8_t *file_content = + (uint8_t *)mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); + if (MAP_FAILED == file_content) { + if (ENOMEM == errno) { + status = MMDB_OUT_OF_MEMORY_ERROR; + } else { + status = MMDB_IO_ERROR; + } + goto cleanup; + } + + mmdb->file_size = size; + mmdb->file_content = file_content; + +cleanup:; + int saved_errno = errno; + if (fd >= 0) { + close(fd); + } + errno = saved_errno; + + return status; +} + +#endif // _WIN32 + +static const uint8_t *find_metadata(const uint8_t *file_content, + ssize_t file_size, + uint32_t *metadata_size) { + const ssize_t marker_len = sizeof(METADATA_MARKER) - 1; + ssize_t max_size = file_size > METADATA_BLOCK_MAX_SIZE + ? METADATA_BLOCK_MAX_SIZE + : file_size; + + uint8_t *search_area = (uint8_t *)(file_content + (file_size - max_size)); + uint8_t *start = search_area; + uint8_t *tmp; + do { + tmp = mmdb_memmem(search_area, max_size, METADATA_MARKER, marker_len); + + if (NULL != tmp) { + max_size -= tmp - search_area; + search_area = tmp; + + /* Continue searching just after the marker we just read, in case + * there are multiple markers in the same file. This would be odd + * but is certainly not impossible. */ + max_size -= marker_len; + search_area += marker_len; + } + } while (NULL != tmp); + + if (search_area == start) { + return NULL; + } + + *metadata_size = (uint32_t)max_size; + + return search_area; +} + +static int read_metadata(MMDB_s *mmdb) { + /* We need to create a fake MMDB_s struct in order to decode values from + the metadata. The metadata is basically just like the data section, so we + want to use the same functions we use for the data section to get + metadata values. */ + MMDB_s metadata_db = make_fake_metadata_db(mmdb); + + MMDB_entry_s metadata_start = {.mmdb = &metadata_db, .offset = 0}; + + int status = value_for_key_as_uint32( + &metadata_start, "node_count", &mmdb->metadata.node_count); + if (MMDB_SUCCESS != status) { + return status; + } + if (!mmdb->metadata.node_count) { + DEBUG_MSG("could not find node_count value in metadata"); + return MMDB_INVALID_METADATA_ERROR; + } + + status = value_for_key_as_uint16( + &metadata_start, "record_size", &mmdb->metadata.record_size); + if (MMDB_SUCCESS != status) { + return status; + } + if (!mmdb->metadata.record_size) { + DEBUG_MSG("could not find record_size value in metadata"); + return MMDB_INVALID_METADATA_ERROR; + } + + if (mmdb->metadata.record_size != 24 && mmdb->metadata.record_size != 28 && + mmdb->metadata.record_size != 32) { + DEBUG_MSGF("bad record size in metadata: %i", + mmdb->metadata.record_size); + return MMDB_UNKNOWN_DATABASE_FORMAT_ERROR; + } + + status = value_for_key_as_uint16( + &metadata_start, "ip_version", &mmdb->metadata.ip_version); + if (MMDB_SUCCESS != status) { + return status; + } + if (!mmdb->metadata.ip_version) { + DEBUG_MSG("could not find ip_version value in metadata"); + return MMDB_INVALID_METADATA_ERROR; + } + if (!(mmdb->metadata.ip_version == 4 || mmdb->metadata.ip_version == 6)) { + DEBUG_MSGF("ip_version value in metadata is not 4 or 6 - it was %i", + mmdb->metadata.ip_version); + return MMDB_INVALID_METADATA_ERROR; + } + + status = value_for_key_as_string( + &metadata_start, "database_type", &mmdb->metadata.database_type); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("error finding database_type value in metadata"); + return status; + } + + status = populate_languages_metadata(mmdb, &metadata_db, &metadata_start); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("could not populate languages from metadata"); + return status; + } + + status = + value_for_key_as_uint16(&metadata_start, + "binary_format_major_version", + &mmdb->metadata.binary_format_major_version); + if (MMDB_SUCCESS != status) { + return status; + } + if (!mmdb->metadata.binary_format_major_version) { + DEBUG_MSG( + "could not find binary_format_major_version value in metadata"); + return MMDB_INVALID_METADATA_ERROR; + } + + status = + value_for_key_as_uint16(&metadata_start, + "binary_format_minor_version", + &mmdb->metadata.binary_format_minor_version); + if (MMDB_SUCCESS != status) { + return status; + } + + status = value_for_key_as_uint64( + &metadata_start, "build_epoch", &mmdb->metadata.build_epoch); + if (MMDB_SUCCESS != status) { + return status; + } + if (!mmdb->metadata.build_epoch) { + DEBUG_MSG("could not find build_epoch value in metadata"); + return MMDB_INVALID_METADATA_ERROR; + } + + status = populate_description_metadata(mmdb, &metadata_db, &metadata_start); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("could not populate description from metadata"); + return status; + } + + mmdb->full_record_byte_size = mmdb->metadata.record_size * 2 / 8U; + + mmdb->depth = mmdb->metadata.ip_version == 4 ? 32 : 128; + + return MMDB_SUCCESS; +} + +static MMDB_s make_fake_metadata_db(const MMDB_s *const mmdb) { + MMDB_s fake_metadata_db = {.data_section = mmdb->metadata_section, + .data_section_size = + mmdb->metadata_section_size}; + + return fake_metadata_db; +} + +static int +value_for_key_as_uint16(MMDB_entry_s *start, char *key, uint16_t *value) { + MMDB_entry_data_s entry_data; + const char *path[] = {key, NULL}; + int status = MMDB_aget_value(start, &entry_data, path); + if (MMDB_SUCCESS != status) { + return status; + } + if (MMDB_DATA_TYPE_UINT16 != entry_data.type) { + DEBUG_MSGF("expect uint16 for %s but received %s", + key, + type_num_to_name(entry_data.type)); + return MMDB_INVALID_METADATA_ERROR; + } + *value = entry_data.uint16; + return MMDB_SUCCESS; +} + +static int +value_for_key_as_uint32(MMDB_entry_s *start, char *key, uint32_t *value) { + MMDB_entry_data_s entry_data; + const char *path[] = {key, NULL}; + int status = MMDB_aget_value(start, &entry_data, path); + if (MMDB_SUCCESS != status) { + return status; + } + if (MMDB_DATA_TYPE_UINT32 != entry_data.type) { + DEBUG_MSGF("expect uint32 for %s but received %s", + key, + type_num_to_name(entry_data.type)); + return MMDB_INVALID_METADATA_ERROR; + } + *value = entry_data.uint32; + return MMDB_SUCCESS; +} + +static int +value_for_key_as_uint64(MMDB_entry_s *start, char *key, uint64_t *value) { + MMDB_entry_data_s entry_data; + const char *path[] = {key, NULL}; + int status = MMDB_aget_value(start, &entry_data, path); + if (MMDB_SUCCESS != status) { + return status; + } + if (MMDB_DATA_TYPE_UINT64 != entry_data.type) { + DEBUG_MSGF("expect uint64 for %s but received %s", + key, + type_num_to_name(entry_data.type)); + return MMDB_INVALID_METADATA_ERROR; + } + *value = entry_data.uint64; + return MMDB_SUCCESS; +} + +static int +value_for_key_as_string(MMDB_entry_s *start, char *key, char const **value) { + MMDB_entry_data_s entry_data; + const char *path[] = {key, NULL}; + int status = MMDB_aget_value(start, &entry_data, path); + if (MMDB_SUCCESS != status) { + return status; + } + if (MMDB_DATA_TYPE_UTF8_STRING != entry_data.type) { + DEBUG_MSGF("expect string for %s but received %s", + key, + type_num_to_name(entry_data.type)); + return MMDB_INVALID_METADATA_ERROR; + } + *value = mmdb_strndup((char *)entry_data.utf8_string, entry_data.data_size); + if (NULL == *value) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + return MMDB_SUCCESS; +} + +static int populate_languages_metadata(MMDB_s *mmdb, + MMDB_s *metadata_db, + MMDB_entry_s *metadata_start) { + MMDB_entry_data_s entry_data; + + const char *path[] = {"languages", NULL}; + int status = MMDB_aget_value(metadata_start, &entry_data, path); + if (MMDB_SUCCESS != status) { + return status; + } + if (MMDB_DATA_TYPE_ARRAY != entry_data.type) { + return MMDB_INVALID_METADATA_ERROR; + } + + MMDB_entry_s array_start = {.mmdb = metadata_db, + .offset = entry_data.offset}; + + MMDB_entry_data_list_s *member; + status = MMDB_get_entry_data_list(&array_start, &member); + if (MMDB_SUCCESS != status) { + return status; + } + + MMDB_entry_data_list_s *first_member = member; + + uint32_t array_size = member->entry_data.data_size; + MAYBE_CHECK_SIZE_OVERFLOW( + array_size, SIZE_MAX / sizeof(char *), MMDB_INVALID_METADATA_ERROR); + + mmdb->metadata.languages.count = 0; + mmdb->metadata.languages.names = calloc(array_size, sizeof(char *)); + if (NULL == mmdb->metadata.languages.names) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + + for (uint32_t i = 0; i < array_size; i++) { + member = member->next; + if (MMDB_DATA_TYPE_UTF8_STRING != member->entry_data.type) { + return MMDB_INVALID_METADATA_ERROR; + } + + mmdb->metadata.languages.names[i] = + mmdb_strndup((char *)member->entry_data.utf8_string, + member->entry_data.data_size); + + if (NULL == mmdb->metadata.languages.names[i]) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + // We assign this as we go so that if we fail a calloc and need to + // free it, the count is right. + mmdb->metadata.languages.count = i + 1; + } + + MMDB_free_entry_data_list(first_member); + + return MMDB_SUCCESS; +} + +static int populate_description_metadata(MMDB_s *mmdb, + MMDB_s *metadata_db, + MMDB_entry_s *metadata_start) { + MMDB_entry_data_s entry_data; + + const char *path[] = {"description", NULL}; + int status = MMDB_aget_value(metadata_start, &entry_data, path); + if (MMDB_SUCCESS != status) { + return status; + } + + if (MMDB_DATA_TYPE_MAP != entry_data.type) { + DEBUG_MSGF("Unexpected entry_data type: %d", entry_data.type); + return MMDB_INVALID_METADATA_ERROR; + } + + MMDB_entry_s map_start = {.mmdb = metadata_db, .offset = entry_data.offset}; + + MMDB_entry_data_list_s *member; + status = MMDB_get_entry_data_list(&map_start, &member); + if (MMDB_SUCCESS != status) { + DEBUG_MSGF( + "MMDB_get_entry_data_list failed while populating description." + " status = %d (%s)", + status, + MMDB_strerror(status)); + return status; + } + + MMDB_entry_data_list_s *first_member = member; + + uint32_t map_size = member->entry_data.data_size; + mmdb->metadata.description.count = 0; + if (0 == map_size) { + mmdb->metadata.description.descriptions = NULL; + goto cleanup; + } + MAYBE_CHECK_SIZE_OVERFLOW(map_size, + SIZE_MAX / sizeof(MMDB_description_s *), + MMDB_INVALID_METADATA_ERROR); + + mmdb->metadata.description.descriptions = + calloc(map_size, sizeof(MMDB_description_s *)); + if (NULL == mmdb->metadata.description.descriptions) { + status = MMDB_OUT_OF_MEMORY_ERROR; + goto cleanup; + } + + for (uint32_t i = 0; i < map_size; i++) { + mmdb->metadata.description.descriptions[i] = + calloc(1, sizeof(MMDB_description_s)); + if (NULL == mmdb->metadata.description.descriptions[i]) { + status = MMDB_OUT_OF_MEMORY_ERROR; + goto cleanup; + } + + mmdb->metadata.description.count = i + 1; + mmdb->metadata.description.descriptions[i]->language = NULL; + mmdb->metadata.description.descriptions[i]->description = NULL; + + member = member->next; + + if (MMDB_DATA_TYPE_UTF8_STRING != member->entry_data.type) { + status = MMDB_INVALID_METADATA_ERROR; + goto cleanup; + } + + mmdb->metadata.description.descriptions[i]->language = + mmdb_strndup((char *)member->entry_data.utf8_string, + member->entry_data.data_size); + + if (NULL == mmdb->metadata.description.descriptions[i]->language) { + status = MMDB_OUT_OF_MEMORY_ERROR; + goto cleanup; + } + + member = member->next; + + if (MMDB_DATA_TYPE_UTF8_STRING != member->entry_data.type) { + status = MMDB_INVALID_METADATA_ERROR; + goto cleanup; + } + + mmdb->metadata.description.descriptions[i]->description = + mmdb_strndup((char *)member->entry_data.utf8_string, + member->entry_data.data_size); + + if (NULL == mmdb->metadata.description.descriptions[i]->description) { + status = MMDB_OUT_OF_MEMORY_ERROR; + goto cleanup; + } + } + +cleanup: + MMDB_free_entry_data_list(first_member); + + return status; +} + +MMDB_lookup_result_s MMDB_lookup_string(const MMDB_s *const mmdb, + const char *const ipstr, + int *const gai_error, + int *const mmdb_error) { + MMDB_lookup_result_s result = {.found_entry = false, + .netmask = 0, + .entry = {.mmdb = mmdb, .offset = 0}}; + + struct addrinfo *addresses = NULL; + *gai_error = resolve_any_address(ipstr, &addresses); + + if (!*gai_error) { + result = MMDB_lookup_sockaddr(mmdb, addresses->ai_addr, mmdb_error); + } + + if (NULL != addresses) { + freeaddrinfo(addresses); + } + + return result; +} + +static int resolve_any_address(const char *ipstr, struct addrinfo **addresses) { + struct addrinfo hints = { + .ai_family = AF_UNSPEC, + .ai_flags = AI_NUMERICHOST, + // We set ai_socktype so that we only get one result back + .ai_socktype = SOCK_STREAM}; + + int gai_status = getaddrinfo(ipstr, NULL, &hints, addresses); + if (gai_status) { + return gai_status; + } + + return 0; +} + +MMDB_lookup_result_s MMDB_lookup_sockaddr(const MMDB_s *const mmdb, + const struct sockaddr *const sockaddr, + int *const mmdb_error) { + MMDB_lookup_result_s result = {.found_entry = false, + .netmask = 0, + .entry = {.mmdb = mmdb, .offset = 0}}; + + uint8_t mapped_address[16], *address; + if (mmdb->metadata.ip_version == 4) { + if (sockaddr->sa_family == AF_INET6) { + *mmdb_error = MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR; + return result; + } + address = (uint8_t *)&((struct sockaddr_in *)sockaddr)->sin_addr.s_addr; + } else { + if (sockaddr->sa_family == AF_INET6) { + address = (uint8_t *)&((struct sockaddr_in6 *)sockaddr) + ->sin6_addr.s6_addr; + } else { + address = mapped_address; + memset(address, 0, 12); + memcpy(address + 12, + &((struct sockaddr_in *)sockaddr)->sin_addr.s_addr, + 4); + } + } + + *mmdb_error = find_address_in_search_tree( + mmdb, address, sockaddr->sa_family, &result); + + return result; +} + +static int find_address_in_search_tree(const MMDB_s *const mmdb, + uint8_t *address, + sa_family_t address_family, + MMDB_lookup_result_s *result) { + record_info_s record_info = record_info_for_database(mmdb); + if (0 == record_info.right_record_offset) { + return MMDB_UNKNOWN_DATABASE_FORMAT_ERROR; + } + + uint32_t value = 0; + uint16_t current_bit = 0; + if (mmdb->metadata.ip_version == 6 && address_family == AF_INET) { + value = mmdb->ipv4_start_node.node_value; + current_bit = mmdb->ipv4_start_node.netmask; + } + + uint32_t node_count = mmdb->metadata.node_count; + const uint8_t *search_tree = mmdb->file_content; + const uint8_t *record_pointer; + for (; current_bit < mmdb->depth && value < node_count; current_bit++) { + uint8_t bit = + 1U & (address[current_bit >> 3] >> (7 - (current_bit % 8))); + + record_pointer = &search_tree[value * record_info.record_length]; + if (record_pointer + record_info.record_length > mmdb->data_section) { + return MMDB_CORRUPT_SEARCH_TREE_ERROR; + } + if (bit) { + record_pointer += record_info.right_record_offset; + value = record_info.right_record_getter(record_pointer); + } else { + value = record_info.left_record_getter(record_pointer); + } + } + + result->netmask = current_bit; + + if (value >= node_count + mmdb->data_section_size) { + // The pointer points off the end of the database. + return MMDB_CORRUPT_SEARCH_TREE_ERROR; + } + + if (value == node_count) { + // record is empty + result->found_entry = false; + return MMDB_SUCCESS; + } + result->found_entry = true; + result->entry.offset = data_section_offset_for_record(mmdb, value); + + return MMDB_SUCCESS; +} + +static record_info_s record_info_for_database(const MMDB_s *const mmdb) { + record_info_s record_info = {.record_length = mmdb->full_record_byte_size, + .right_record_offset = 0}; + + if (record_info.record_length == 6) { + record_info.left_record_getter = &get_uint24; + record_info.right_record_getter = &get_uint24; + record_info.right_record_offset = 3; + } else if (record_info.record_length == 7) { + record_info.left_record_getter = &get_left_28_bit_record; + record_info.right_record_getter = &get_right_28_bit_record; + record_info.right_record_offset = 3; + } else if (record_info.record_length == 8) { + record_info.left_record_getter = &get_uint32; + record_info.right_record_getter = &get_uint32; + record_info.right_record_offset = 4; + } else { + assert(false); + } + + return record_info; +} + +static int find_ipv4_start_node(MMDB_s *const mmdb) { + /* In a pathological case of a database with a single node search tree, + * this check will be true even after we've found the IPv4 start node, but + * that doesn't seem worth trying to fix. */ + if (mmdb->ipv4_start_node.node_value != 0) { + return MMDB_SUCCESS; + } + + record_info_s record_info = record_info_for_database(mmdb); + + const uint8_t *search_tree = mmdb->file_content; + uint32_t node_value = 0; + const uint8_t *record_pointer; + uint16_t netmask; + uint32_t node_count = mmdb->metadata.node_count; + + for (netmask = 0; netmask < 96 && node_value < node_count; netmask++) { + record_pointer = &search_tree[node_value * record_info.record_length]; + if (record_pointer + record_info.record_length > mmdb->data_section) { + return MMDB_CORRUPT_SEARCH_TREE_ERROR; + } + node_value = record_info.left_record_getter(record_pointer); + } + + mmdb->ipv4_start_node.node_value = node_value; + mmdb->ipv4_start_node.netmask = netmask; + + return MMDB_SUCCESS; +} + +static uint8_t record_type(const MMDB_s *const mmdb, uint64_t record) { + uint32_t node_count = mmdb->metadata.node_count; + + /* Ideally we'd check to make sure that a record never points to a + * previously seen value, but that's more complicated. For now, we can + * at least check that we don't end up at the top of the tree again. */ + if (record == 0) { + DEBUG_MSG("record has a value of 0"); + return MMDB_RECORD_TYPE_INVALID; + } + + if (record < node_count) { + return MMDB_RECORD_TYPE_SEARCH_NODE; + } + + if (record == node_count) { + return MMDB_RECORD_TYPE_EMPTY; + } + + if (record - node_count < mmdb->data_section_size) { + return MMDB_RECORD_TYPE_DATA; + } + + DEBUG_MSG("record has a value that points outside of the database"); + return MMDB_RECORD_TYPE_INVALID; +} + +static uint32_t get_left_28_bit_record(const uint8_t *record) { + return record[0] * 65536 + record[1] * 256 + record[2] + + ((record[3] & 0xf0) << 20); +} + +static uint32_t get_right_28_bit_record(const uint8_t *record) { + uint32_t value = get_uint32(record); + return value & 0xfffffff; +} + +int MMDB_read_node(const MMDB_s *const mmdb, + uint32_t node_number, + MMDB_search_node_s *const node) { + record_info_s record_info = record_info_for_database(mmdb); + if (0 == record_info.right_record_offset) { + return MMDB_UNKNOWN_DATABASE_FORMAT_ERROR; + } + + if (node_number > mmdb->metadata.node_count) { + return MMDB_INVALID_NODE_NUMBER_ERROR; + } + + const uint8_t *search_tree = mmdb->file_content; + const uint8_t *record_pointer = + &search_tree[node_number * record_info.record_length]; + node->left_record = record_info.left_record_getter(record_pointer); + record_pointer += record_info.right_record_offset; + node->right_record = record_info.right_record_getter(record_pointer); + + node->left_record_type = record_type(mmdb, node->left_record); + node->right_record_type = record_type(mmdb, node->right_record); + + // Note that offset will be invalid if the record type is not + // MMDB_RECORD_TYPE_DATA, but that's ok. Any use of the record entry + // for other data types is a programming error. + node->left_record_entry = (struct MMDB_entry_s){ + .mmdb = mmdb, + .offset = data_section_offset_for_record(mmdb, node->left_record), + }; + node->right_record_entry = (struct MMDB_entry_s){ + .mmdb = mmdb, + .offset = data_section_offset_for_record(mmdb, node->right_record), + }; + + return MMDB_SUCCESS; +} + +static uint32_t data_section_offset_for_record(const MMDB_s *const mmdb, + uint64_t record) { + return (uint32_t)record - mmdb->metadata.node_count - + MMDB_DATA_SECTION_SEPARATOR; +} + +int MMDB_get_value(MMDB_entry_s *const start, + MMDB_entry_data_s *const entry_data, + ...) { + va_list path; + va_start(path, entry_data); + int status = MMDB_vget_value(start, entry_data, path); + va_end(path); + return status; +} + +int MMDB_vget_value(MMDB_entry_s *const start, + MMDB_entry_data_s *const entry_data, + va_list va_path) { + int length = path_length(va_path); + const char *path_elem; + int i = 0; + + MAYBE_CHECK_SIZE_OVERFLOW(length, + SIZE_MAX / sizeof(const char *) - 1, + MMDB_INVALID_METADATA_ERROR); + + const char **path = calloc(length + 1, sizeof(const char *)); + if (NULL == path) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + + while (NULL != (path_elem = va_arg(va_path, char *))) { + path[i] = path_elem; + i++; + } + path[i] = NULL; + + int status = MMDB_aget_value(start, entry_data, path); + + free((char **)path); + + return status; +} + +static int path_length(va_list va_path) { + int i = 0; + const char *ignore; + va_list path_copy; + va_copy(path_copy, va_path); + + while (NULL != (ignore = va_arg(path_copy, char *))) { + i++; + } + + va_end(path_copy); + + return i; +} + +int MMDB_aget_value(MMDB_entry_s *const start, + MMDB_entry_data_s *const entry_data, + const char *const *const path) { + const MMDB_s *const mmdb = start->mmdb; + uint32_t offset = start->offset; + + memset(entry_data, 0, sizeof(MMDB_entry_data_s)); + DEBUG_NL; + DEBUG_MSG("looking up value by path"); + + CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, entry_data); + + DEBUG_NL; + DEBUG_MSGF("top level element is a %s", type_num_to_name(entry_data->type)); + + /* Can this happen? It'd probably represent a pathological case under + * normal use, but there's nothing preventing someone from passing an + * invalid MMDB_entry_s struct to this function */ + if (!entry_data->has_data) { + return MMDB_INVALID_LOOKUP_PATH_ERROR; + } + + const char *path_elem; + int i = 0; + while (NULL != (path_elem = path[i++])) { + DEBUG_NL; + DEBUG_MSGF("path elem = %s", path_elem); + + /* XXX - it'd be good to find a quicker way to skip through these + entries that doesn't involve decoding them + completely. Basically we need to just use the size from the + control byte to advance our pointer rather than calling + decode_one(). */ + if (entry_data->type == MMDB_DATA_TYPE_ARRAY) { + int status = lookup_path_in_array(path_elem, mmdb, entry_data); + if (MMDB_SUCCESS != status) { + memset(entry_data, 0, sizeof(MMDB_entry_data_s)); + return status; + } + } else if (entry_data->type == MMDB_DATA_TYPE_MAP) { + int status = lookup_path_in_map(path_elem, mmdb, entry_data); + if (MMDB_SUCCESS != status) { + memset(entry_data, 0, sizeof(MMDB_entry_data_s)); + return status; + } + } else { + /* Once we make the code traverse maps & arrays without calling + * decode_one() we can get rid of this. */ + memset(entry_data, 0, sizeof(MMDB_entry_data_s)); + return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR; + } + } + + return MMDB_SUCCESS; +} + +static int lookup_path_in_array(const char *path_elem, + const MMDB_s *const mmdb, + MMDB_entry_data_s *entry_data) { + uint32_t size = entry_data->data_size; + char *first_invalid; + + int saved_errno = errno; + errno = 0; + int array_index = strtol(path_elem, &first_invalid, 10); + if (ERANGE == errno) { + errno = saved_errno; + return MMDB_INVALID_LOOKUP_PATH_ERROR; + } + errno = saved_errno; + + if (array_index < 0) { + array_index += size; + + if (array_index < 0) { + return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR; + } + } + + if (*first_invalid || (uint32_t)array_index >= size) { + return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR; + } + + for (int i = 0; i < array_index; i++) { + /* We don't want to follow a pointer here. If the next element is a + * pointer we simply skip it and keep going */ + CHECKED_DECODE_ONE(mmdb, entry_data->offset_to_next, entry_data); + int status = skip_map_or_array(mmdb, entry_data); + if (MMDB_SUCCESS != status) { + return status; + } + } + + MMDB_entry_data_s value; + CHECKED_DECODE_ONE_FOLLOW(mmdb, entry_data->offset_to_next, &value); + memcpy(entry_data, &value, sizeof(MMDB_entry_data_s)); + + return MMDB_SUCCESS; +} + +static int lookup_path_in_map(const char *path_elem, + const MMDB_s *const mmdb, + MMDB_entry_data_s *entry_data) { + uint32_t size = entry_data->data_size; + uint32_t offset = entry_data->offset_to_next; + size_t path_elem_len = strlen(path_elem); + + while (size-- > 0) { + MMDB_entry_data_s key, value; + CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, &key); + + uint32_t offset_to_value = key.offset_to_next; + + if (MMDB_DATA_TYPE_UTF8_STRING != key.type) { + return MMDB_INVALID_DATA_ERROR; + } + + if (key.data_size == path_elem_len && + !memcmp(path_elem, key.utf8_string, path_elem_len)) { + + DEBUG_MSG("found key matching path elem"); + + CHECKED_DECODE_ONE_FOLLOW(mmdb, offset_to_value, &value); + memcpy(entry_data, &value, sizeof(MMDB_entry_data_s)); + return MMDB_SUCCESS; + } else { + /* We don't want to follow a pointer here. If the next element is + * a pointer we simply skip it and keep going */ + CHECKED_DECODE_ONE(mmdb, offset_to_value, &value); + int status = skip_map_or_array(mmdb, &value); + if (MMDB_SUCCESS != status) { + return status; + } + offset = value.offset_to_next; + } + } + + memset(entry_data, 0, sizeof(MMDB_entry_data_s)); + return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR; +} + +static int skip_map_or_array(const MMDB_s *const mmdb, + MMDB_entry_data_s *entry_data) { + if (entry_data->type == MMDB_DATA_TYPE_MAP) { + uint32_t size = entry_data->data_size; + while (size-- > 0) { + CHECKED_DECODE_ONE( + mmdb, entry_data->offset_to_next, entry_data); // key + CHECKED_DECODE_ONE( + mmdb, entry_data->offset_to_next, entry_data); // value + int status = skip_map_or_array(mmdb, entry_data); + if (MMDB_SUCCESS != status) { + return status; + } + } + } else if (entry_data->type == MMDB_DATA_TYPE_ARRAY) { + uint32_t size = entry_data->data_size; + while (size-- > 0) { + CHECKED_DECODE_ONE( + mmdb, entry_data->offset_to_next, entry_data); // value + int status = skip_map_or_array(mmdb, entry_data); + if (MMDB_SUCCESS != status) { + return status; + } + } + } + + return MMDB_SUCCESS; +} + +static int decode_one_follow(const MMDB_s *const mmdb, + uint32_t offset, + MMDB_entry_data_s *entry_data) { + CHECKED_DECODE_ONE(mmdb, offset, entry_data); + if (entry_data->type == MMDB_DATA_TYPE_POINTER) { + uint32_t next = entry_data->offset_to_next; + CHECKED_DECODE_ONE(mmdb, entry_data->pointer, entry_data); + /* Pointers to pointers are illegal under the spec */ + if (entry_data->type == MMDB_DATA_TYPE_POINTER) { + DEBUG_MSG("pointer points to another pointer"); + return MMDB_INVALID_DATA_ERROR; + } + + /* The pointer could point to any part of the data section but the + * next entry for this particular offset may be the one after the + * pointer, not the one after whatever the pointer points to. This + * depends on whether the pointer points to something that is a simple + * value or a compound value. For a compound value, the next one is + * the one after the pointer result, not the one after the pointer. */ + if (entry_data->type != MMDB_DATA_TYPE_MAP && + entry_data->type != MMDB_DATA_TYPE_ARRAY) { + + entry_data->offset_to_next = next; + } + } + + return MMDB_SUCCESS; +} + +#if !MMDB_UINT128_IS_BYTE_ARRAY +static mmdb_uint128_t get_uint128(const uint8_t *p, int length) { + mmdb_uint128_t value = 0; + while (length-- > 0) { + value <<= 8; + value += *p++; + } + return value; +} +#endif + +static int decode_one(const MMDB_s *const mmdb, + uint32_t offset, + MMDB_entry_data_s *entry_data) { + const uint8_t *mem = mmdb->data_section; + + // We subtract rather than add as it possible that offset + 1 + // could overflow for a corrupt database while an underflow + // from data_section_size - 1 should not be possible. + if (offset > mmdb->data_section_size - 1) { + DEBUG_MSGF("Offset (%d) past data section (%d)", + offset, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + + entry_data->offset = offset; + entry_data->has_data = true; + + DEBUG_NL; + DEBUG_MSGF("Offset: %i", offset); + + uint8_t ctrl = mem[offset++]; + DEBUG_BINARY("Control byte: %s", ctrl); + + int type = (ctrl >> 5) & 7; + DEBUG_MSGF("Type: %i (%s)", type, type_num_to_name(type)); + + if (type == MMDB_DATA_TYPE_EXTENDED) { + // Subtracting 1 to avoid possible overflow on offset + 1 + if (offset > mmdb->data_section_size - 1) { + DEBUG_MSGF("Extended type offset (%d) past data section (%d)", + offset, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + type = get_ext_type(mem[offset++]); + DEBUG_MSGF("Extended type: %i (%s)", type, type_num_to_name(type)); + } + + entry_data->type = type; + + if (type == MMDB_DATA_TYPE_POINTER) { + uint8_t psize = ((ctrl >> 3) & 3) + 1; + DEBUG_MSGF("Pointer size: %i", psize); + + // We check that the offset does not extend past the end of the + // database and that the subtraction of psize did not underflow. + if (offset > mmdb->data_section_size - psize || + mmdb->data_section_size < psize) { + DEBUG_MSGF("Pointer offset (%d) past data section (%d)", + offset + psize, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + entry_data->pointer = get_ptr_from(ctrl, &mem[offset], psize); + DEBUG_MSGF("Pointer to: %i", entry_data->pointer); + + entry_data->data_size = psize; + entry_data->offset_to_next = offset + psize; + return MMDB_SUCCESS; + } + + uint32_t size = ctrl & 31; + switch (size) { + case 29: + // We subtract when checking offset to avoid possible overflow + if (offset > mmdb->data_section_size - 1) { + DEBUG_MSGF("String end (%d, case 29) past data section (%d)", + offset, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + size = 29 + mem[offset++]; + break; + case 30: + // We subtract when checking offset to avoid possible overflow + if (offset > mmdb->data_section_size - 2) { + DEBUG_MSGF("String end (%d, case 30) past data section (%d)", + offset, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + size = 285 + get_uint16(&mem[offset]); + offset += 2; + break; + case 31: + // We subtract when checking offset to avoid possible overflow + if (offset > mmdb->data_section_size - 3) { + DEBUG_MSGF("String end (%d, case 31) past data section (%d)", + offset, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + size = 65821 + get_uint24(&mem[offset]); + offset += 3; + default: + break; + } + + DEBUG_MSGF("Size: %i", size); + + if (type == MMDB_DATA_TYPE_MAP || type == MMDB_DATA_TYPE_ARRAY) { + entry_data->data_size = size; + entry_data->offset_to_next = offset; + return MMDB_SUCCESS; + } + + if (type == MMDB_DATA_TYPE_BOOLEAN) { + entry_data->boolean = size ? true : false; + entry_data->data_size = 0; + entry_data->offset_to_next = offset; + DEBUG_MSGF("boolean value: %s", entry_data->boolean ? "true" : "false"); + return MMDB_SUCCESS; + } + + // Check that the data doesn't extend past the end of the memory + // buffer and that the calculation in doing this did not underflow. + if (offset > mmdb->data_section_size - size || + mmdb->data_section_size < size) { + DEBUG_MSGF("Data end (%d) past data section (%d)", + offset + size, + mmdb->data_section_size); + return MMDB_INVALID_DATA_ERROR; + } + + if (type == MMDB_DATA_TYPE_UINT16) { + if (size > 2) { + DEBUG_MSGF("uint16 of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } + entry_data->uint16 = (uint16_t)get_uintX(&mem[offset], size); + DEBUG_MSGF("uint16 value: %u", entry_data->uint16); + } else if (type == MMDB_DATA_TYPE_UINT32) { + if (size > 4) { + DEBUG_MSGF("uint32 of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } + entry_data->uint32 = (uint32_t)get_uintX(&mem[offset], size); + DEBUG_MSGF("uint32 value: %u", entry_data->uint32); + } else if (type == MMDB_DATA_TYPE_INT32) { + if (size > 4) { + DEBUG_MSGF("int32 of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } + entry_data->int32 = get_sintX(&mem[offset], size); + DEBUG_MSGF("int32 value: %i", entry_data->int32); + } else if (type == MMDB_DATA_TYPE_UINT64) { + if (size > 8) { + DEBUG_MSGF("uint64 of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } + entry_data->uint64 = get_uintX(&mem[offset], size); + DEBUG_MSGF("uint64 value: %" PRIu64, entry_data->uint64); + } else if (type == MMDB_DATA_TYPE_UINT128) { + if (size > 16) { + DEBUG_MSGF("uint128 of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } +#if MMDB_UINT128_IS_BYTE_ARRAY + memset(entry_data->uint128, 0, 16); + if (size > 0) { + memcpy(entry_data->uint128 + 16 - size, &mem[offset], size); + } +#else + entry_data->uint128 = get_uint128(&mem[offset], size); +#endif + } else if (type == MMDB_DATA_TYPE_FLOAT) { + if (size != 4) { + DEBUG_MSGF("float of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } + size = 4; + entry_data->float_value = get_ieee754_float(&mem[offset]); + DEBUG_MSGF("float value: %f", entry_data->float_value); + } else if (type == MMDB_DATA_TYPE_DOUBLE) { + if (size != 8) { + DEBUG_MSGF("double of size %d", size); + return MMDB_INVALID_DATA_ERROR; + } + size = 8; + entry_data->double_value = get_ieee754_double(&mem[offset]); + DEBUG_MSGF("double value: %f", entry_data->double_value); + } else if (type == MMDB_DATA_TYPE_UTF8_STRING) { + entry_data->utf8_string = size == 0 ? "" : (char *)&mem[offset]; + entry_data->data_size = size; +#ifdef MMDB_DEBUG + char *string = + mmdb_strndup(entry_data->utf8_string, size > 50 ? 50 : size); + if (NULL == string) { + abort(); + } + DEBUG_MSGF("string value: %s", string); + free(string); +#endif + } else if (type == MMDB_DATA_TYPE_BYTES) { + entry_data->bytes = &mem[offset]; + entry_data->data_size = size; + } + + entry_data->offset_to_next = offset + size; + + return MMDB_SUCCESS; +} + +static int get_ext_type(int raw_ext_type) { return 7 + raw_ext_type; } + +static uint32_t +get_ptr_from(uint8_t ctrl, uint8_t const *const ptr, int ptr_size) { + uint32_t new_offset; + switch (ptr_size) { + case 1: + new_offset = ((ctrl & 7) << 8) + ptr[0]; + break; + case 2: + new_offset = 2048 + ((ctrl & 7) << 16) + (ptr[0] << 8) + ptr[1]; + break; + case 3: + new_offset = 2048 + 524288 + ((ctrl & 7) << 24) + get_uint24(ptr); + break; + case 4: + default: + new_offset = get_uint32(ptr); + break; + } + return new_offset; +} + +int MMDB_get_metadata_as_entry_data_list( + const MMDB_s *const mmdb, MMDB_entry_data_list_s **const entry_data_list) { + MMDB_s metadata_db = make_fake_metadata_db(mmdb); + + MMDB_entry_s metadata_start = {.mmdb = &metadata_db, .offset = 0}; + + return MMDB_get_entry_data_list(&metadata_start, entry_data_list); +} + +int MMDB_get_entry_data_list(MMDB_entry_s *start, + MMDB_entry_data_list_s **const entry_data_list) { + MMDB_data_pool_s *const pool = data_pool_new(MMDB_POOL_INIT_SIZE); + if (!pool) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + + MMDB_entry_data_list_s *const list = data_pool_alloc(pool); + if (!list) { + data_pool_destroy(pool); + return MMDB_OUT_OF_MEMORY_ERROR; + } + + int const status = + get_entry_data_list(start->mmdb, start->offset, list, pool, 0); + + *entry_data_list = data_pool_to_list(pool); + if (!*entry_data_list) { + data_pool_destroy(pool); + return MMDB_OUT_OF_MEMORY_ERROR; + } + + return status; +} + +static int get_entry_data_list(const MMDB_s *const mmdb, + uint32_t offset, + MMDB_entry_data_list_s *const entry_data_list, + MMDB_data_pool_s *const pool, + int depth) { + if (depth >= MAXIMUM_DATA_STRUCTURE_DEPTH) { + DEBUG_MSG("reached the maximum data structure depth"); + return MMDB_INVALID_DATA_ERROR; + } + depth++; + CHECKED_DECODE_ONE(mmdb, offset, &entry_data_list->entry_data); + + switch (entry_data_list->entry_data.type) { + case MMDB_DATA_TYPE_POINTER: { + uint32_t next_offset = entry_data_list->entry_data.offset_to_next; + uint32_t last_offset; + CHECKED_DECODE_ONE(mmdb, + last_offset = + entry_data_list->entry_data.pointer, + &entry_data_list->entry_data); + + /* Pointers to pointers are illegal under the spec */ + if (entry_data_list->entry_data.type == MMDB_DATA_TYPE_POINTER) { + DEBUG_MSG("pointer points to another pointer"); + return MMDB_INVALID_DATA_ERROR; + } + + if (entry_data_list->entry_data.type == MMDB_DATA_TYPE_ARRAY || + entry_data_list->entry_data.type == MMDB_DATA_TYPE_MAP) { + + int status = get_entry_data_list( + mmdb, last_offset, entry_data_list, pool, depth); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("get_entry_data_list on pointer failed."); + return status; + } + } + entry_data_list->entry_data.offset_to_next = next_offset; + } break; + case MMDB_DATA_TYPE_ARRAY: { + uint32_t array_size = entry_data_list->entry_data.data_size; + uint32_t array_offset = entry_data_list->entry_data.offset_to_next; + while (array_size-- > 0) { + MMDB_entry_data_list_s *entry_data_list_to = + data_pool_alloc(pool); + if (!entry_data_list_to) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + + int status = get_entry_data_list( + mmdb, array_offset, entry_data_list_to, pool, depth); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("get_entry_data_list on array element failed."); + return status; + } + + array_offset = entry_data_list_to->entry_data.offset_to_next; + } + entry_data_list->entry_data.offset_to_next = array_offset; + + } break; + case MMDB_DATA_TYPE_MAP: { + uint32_t size = entry_data_list->entry_data.data_size; + + offset = entry_data_list->entry_data.offset_to_next; + while (size-- > 0) { + MMDB_entry_data_list_s *list_key = data_pool_alloc(pool); + if (!list_key) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + + int status = + get_entry_data_list(mmdb, offset, list_key, pool, depth); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("get_entry_data_list on map key failed."); + return status; + } + + offset = list_key->entry_data.offset_to_next; + + MMDB_entry_data_list_s *list_value = data_pool_alloc(pool); + if (!list_value) { + return MMDB_OUT_OF_MEMORY_ERROR; + } + + status = + get_entry_data_list(mmdb, offset, list_value, pool, depth); + if (MMDB_SUCCESS != status) { + DEBUG_MSG("get_entry_data_list on map element failed."); + return status; + } + offset = list_value->entry_data.offset_to_next; + } + entry_data_list->entry_data.offset_to_next = offset; + } break; + default: + break; + } + + return MMDB_SUCCESS; +} + +static float get_ieee754_float(const uint8_t *restrict p) { + volatile float f; + uint8_t *q = (void *)&f; +/* Windows builds don't use autoconf but we can assume they're all + * little-endian. */ +#if MMDB_LITTLE_ENDIAN || _WIN32 + q[3] = p[0]; + q[2] = p[1]; + q[1] = p[2]; + q[0] = p[3]; +#else + memcpy(q, p, 4); +#endif + return f; +} + +static double get_ieee754_double(const uint8_t *restrict p) { + volatile double d; + uint8_t *q = (void *)&d; +#if MMDB_LITTLE_ENDIAN || _WIN32 + q[7] = p[0]; + q[6] = p[1]; + q[5] = p[2]; + q[4] = p[3]; + q[3] = p[4]; + q[2] = p[5]; + q[1] = p[6]; + q[0] = p[7]; +#else + memcpy(q, p, 8); +#endif + + return d; +} + +static uint32_t get_uint32(const uint8_t *p) { + return p[0] * 16777216U + p[1] * 65536 + p[2] * 256 + p[3]; +} + +static uint32_t get_uint24(const uint8_t *p) { + return p[0] * 65536U + p[1] * 256 + p[2]; +} + +static uint32_t get_uint16(const uint8_t *p) { return p[0] * 256U + p[1]; } + +static uint64_t get_uintX(const uint8_t *p, int length) { + uint64_t value = 0; + while (length-- > 0) { + value <<= 8; + value += *p++; + } + return value; +} + +static int32_t get_sintX(const uint8_t *p, int length) { + return (int32_t)get_uintX(p, length); +} + +void MMDB_free_entry_data_list(MMDB_entry_data_list_s *const entry_data_list) { + if (entry_data_list == NULL) { + return; + } + data_pool_destroy(entry_data_list->pool); +} + +void MMDB_close(MMDB_s *const mmdb) { free_mmdb_struct(mmdb); } + +static void free_mmdb_struct(MMDB_s *const mmdb) { + if (!mmdb) { + return; + } + + if (NULL != mmdb->filename) { + FREE_AND_SET_NULL(mmdb->filename); + } + if (NULL != mmdb->file_content) { +#ifdef _WIN32 + UnmapViewOfFile(mmdb->file_content); + /* Winsock is only initialized if open was successful so we only have + * to cleanup then. */ + WSACleanup(); +#else + munmap((void *)mmdb->file_content, mmdb->file_size); +#endif + } + + if (NULL != mmdb->metadata.database_type) { + FREE_AND_SET_NULL(mmdb->metadata.database_type); + } + + free_languages_metadata(mmdb); + free_descriptions_metadata(mmdb); +} + +static void free_languages_metadata(MMDB_s *mmdb) { + if (!mmdb->metadata.languages.names) { + return; + } + + for (size_t i = 0; i < mmdb->metadata.languages.count; i++) { + FREE_AND_SET_NULL(mmdb->metadata.languages.names[i]); + } + FREE_AND_SET_NULL(mmdb->metadata.languages.names); +} + +static void free_descriptions_metadata(MMDB_s *mmdb) { + if (!mmdb->metadata.description.count) { + return; + } + + for (size_t i = 0; i < mmdb->metadata.description.count; i++) { + if (NULL != mmdb->metadata.description.descriptions[i]) { + if (NULL != mmdb->metadata.description.descriptions[i]->language) { + FREE_AND_SET_NULL( + mmdb->metadata.description.descriptions[i]->language); + } + + if (NULL != + mmdb->metadata.description.descriptions[i]->description) { + FREE_AND_SET_NULL( + mmdb->metadata.description.descriptions[i]->description); + } + FREE_AND_SET_NULL(mmdb->metadata.description.descriptions[i]); + } + } + + FREE_AND_SET_NULL(mmdb->metadata.description.descriptions); +} + +const char *MMDB_lib_version(void) { return PACKAGE_VERSION; } + +int MMDB_dump_entry_data_list(FILE *const stream, + MMDB_entry_data_list_s *const entry_data_list, + int indent) { + int status; + dump_entry_data_list(stream, entry_data_list, indent, &status); + return status; +} + +static MMDB_entry_data_list_s * +dump_entry_data_list(FILE *stream, + MMDB_entry_data_list_s *entry_data_list, + int indent, + int *status) { + switch (entry_data_list->entry_data.type) { + case MMDB_DATA_TYPE_MAP: { + uint32_t size = entry_data_list->entry_data.data_size; + + print_indentation(stream, indent); + fprintf(stream, "{\n"); + indent += 2; + + for (entry_data_list = entry_data_list->next; + size && entry_data_list; + size--) { + + if (MMDB_DATA_TYPE_UTF8_STRING != + entry_data_list->entry_data.type) { + *status = MMDB_INVALID_DATA_ERROR; + return NULL; + } + char *key = mmdb_strndup( + (char *)entry_data_list->entry_data.utf8_string, + entry_data_list->entry_data.data_size); + if (NULL == key) { + *status = MMDB_OUT_OF_MEMORY_ERROR; + return NULL; + } + + print_indentation(stream, indent); + fprintf(stream, "\"%s\": \n", key); + free(key); + + entry_data_list = entry_data_list->next; + entry_data_list = dump_entry_data_list( + stream, entry_data_list, indent + 2, status); + + if (MMDB_SUCCESS != *status) { + return NULL; + } + } + + indent -= 2; + print_indentation(stream, indent); + fprintf(stream, "}\n"); + } break; + case MMDB_DATA_TYPE_ARRAY: { + uint32_t size = entry_data_list->entry_data.data_size; + + print_indentation(stream, indent); + fprintf(stream, "[\n"); + indent += 2; + + for (entry_data_list = entry_data_list->next; + size && entry_data_list; + size--) { + entry_data_list = dump_entry_data_list( + stream, entry_data_list, indent, status); + if (MMDB_SUCCESS != *status) { + return NULL; + } + } + + indent -= 2; + print_indentation(stream, indent); + fprintf(stream, "]\n"); + } break; + case MMDB_DATA_TYPE_UTF8_STRING: { + char *string = + mmdb_strndup((char *)entry_data_list->entry_data.utf8_string, + entry_data_list->entry_data.data_size); + if (NULL == string) { + *status = MMDB_OUT_OF_MEMORY_ERROR; + return NULL; + } + print_indentation(stream, indent); + fprintf(stream, "\"%s\" \n", string); + free(string); + entry_data_list = entry_data_list->next; + } break; + case MMDB_DATA_TYPE_BYTES: { + char *hex_string = + bytes_to_hex((uint8_t *)entry_data_list->entry_data.bytes, + entry_data_list->entry_data.data_size); + + if (NULL == hex_string) { + *status = MMDB_OUT_OF_MEMORY_ERROR; + return NULL; + } + + print_indentation(stream, indent); + fprintf(stream, "%s \n", hex_string); + free(hex_string); + + entry_data_list = entry_data_list->next; + } break; + case MMDB_DATA_TYPE_DOUBLE: + print_indentation(stream, indent); + fprintf(stream, + "%f \n", + entry_data_list->entry_data.double_value); + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_FLOAT: + print_indentation(stream, indent); + fprintf(stream, + "%f \n", + entry_data_list->entry_data.float_value); + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_UINT16: + print_indentation(stream, indent); + fprintf( + stream, "%u \n", entry_data_list->entry_data.uint16); + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_UINT32: + print_indentation(stream, indent); + fprintf( + stream, "%u \n", entry_data_list->entry_data.uint32); + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_BOOLEAN: + print_indentation(stream, indent); + fprintf(stream, + "%s \n", + entry_data_list->entry_data.boolean ? "true" : "false"); + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_UINT64: + print_indentation(stream, indent); + fprintf(stream, + "%" PRIu64 " \n", + entry_data_list->entry_data.uint64); + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_UINT128: + print_indentation(stream, indent); +#if MMDB_UINT128_IS_BYTE_ARRAY + char *hex_string = bytes_to_hex( + (uint8_t *)entry_data_list->entry_data.uint128, 16); + if (NULL == hex_string) { + *status = MMDB_OUT_OF_MEMORY_ERROR; + return NULL; + } + fprintf(stream, "0x%s \n", hex_string); + free(hex_string); +#else + uint64_t high = entry_data_list->entry_data.uint128 >> 64; + uint64_t low = (uint64_t)entry_data_list->entry_data.uint128; + fprintf(stream, + "0x%016" PRIX64 "%016" PRIX64 " \n", + high, + low); +#endif + entry_data_list = entry_data_list->next; + break; + case MMDB_DATA_TYPE_INT32: + print_indentation(stream, indent); + fprintf(stream, "%d \n", entry_data_list->entry_data.int32); + entry_data_list = entry_data_list->next; + break; + default: + *status = MMDB_INVALID_DATA_ERROR; + return NULL; + } + + *status = MMDB_SUCCESS; + return entry_data_list; +} + +static void print_indentation(FILE *stream, int i) { + char buffer[1024]; + int size = i >= 1024 ? 1023 : i; + memset(buffer, 32, size); + buffer[size] = '\0'; + fputs(buffer, stream); +} + +static char *bytes_to_hex(uint8_t *bytes, uint32_t size) { + char *hex_string; + MAYBE_CHECK_SIZE_OVERFLOW(size, SIZE_MAX / 2 - 1, NULL); + + hex_string = calloc((size * 2) + 1, sizeof(char)); + if (NULL == hex_string) { + return NULL; + } + + for (uint32_t i = 0; i < size; i++) { + sprintf(hex_string + (2 * i), "%02X", bytes[i]); + } + + return hex_string; +} + +const char *MMDB_strerror(int error_code) { + switch (error_code) { + case MMDB_SUCCESS: + return "Success (not an error)"; + case MMDB_FILE_OPEN_ERROR: + return "Error opening the specified MaxMind DB file"; + case MMDB_CORRUPT_SEARCH_TREE_ERROR: + return "The MaxMind DB file's search tree is corrupt"; + case MMDB_INVALID_METADATA_ERROR: + return "The MaxMind DB file contains invalid metadata"; + case MMDB_IO_ERROR: + return "An attempt to read data from the MaxMind DB file failed"; + case MMDB_OUT_OF_MEMORY_ERROR: + return "A memory allocation call failed"; + case MMDB_UNKNOWN_DATABASE_FORMAT_ERROR: + return "The MaxMind DB file is in a format this library can't " + "handle (unknown record size or binary format version)"; + case MMDB_INVALID_DATA_ERROR: + return "The MaxMind DB file's data section contains bad data " + "(unknown data type or corrupt data)"; + case MMDB_INVALID_LOOKUP_PATH_ERROR: + return "The lookup path contained an invalid value (like a " + "negative integer for an array index)"; + case MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR: + return "The lookup path does not match the data (key that doesn't " + "exist, array index bigger than the array, expected array " + "or map where none exists)"; + case MMDB_INVALID_NODE_NUMBER_ERROR: + return "The MMDB_read_node function was called with a node number " + "that does not exist in the search tree"; + case MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR: + return "You attempted to look up an IPv6 address in an IPv4-only " + "database"; + default: + return "Unknown error code"; + } +} diff --git a/CMakeLists.txt b/CMakeLists.txt index 260c0ed93..4263ada62 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,11 +2,22 @@ cmake_minimum_required(VERSION 3.13) list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake") -# this is the source of truth for version, which will be written to config.h include file. -project(vizer VERSION 3.1.0) -set(VIZER_PRERELEASE "") -set(VIZER_VERSION_NUM "${PROJECT_VERSION}${VIZER_PRERELEASE}") -set(VIZER_VERSION " pktvisor ${PROJECT_VERSION}${VIZER_PRERELEASE}") +# VERSION +# this is the source of truth for semver version +project(visor VERSION 3.2.0) + +# for main line release, this is empty +# for development release, this is "-develop" +# for release candidate, this is "-rc" +set(VISOR_PRERELEASE "-rc") + +# these are computed +set(VISOR_VERSION_NUM "${PROJECT_VERSION}${VISOR_PRERELEASE}") +set(VISOR_VERSION "pktvisor ${PROJECT_VERSION}${VISOR_PRERELEASE}") + +# used in CI +file(WRITE ${CMAKE_BINARY_DIR}/VERSION ${VISOR_VERSION_NUM}) +###### set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -15,14 +26,27 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ON) #set(CMAKE_VERBOSE_MAKEFILE ON) add_compile_options(-Wall -pedantic -W -Wextra -Wno-unknown-pragmas) +include(conan) + +conan_add_remote(NAME ns1labs INDEX 1 + URL https://ns1labs.jfrog.io/artifactory/api/conan/ns1labs-conan + VERIFY_SSL True) + +conan_cmake_autodetect(settings) + +conan_cmake_install(PATH_OR_REFERENCE ${CMAKE_SOURCE_DIR} + BUILD missing + GENERATOR cmake + SETTINGS ${settings} + INSTALL_FOLDER ${CMAKE_BINARY_DIR}) + include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) conan_basic_setup() find_package(Corrade REQUIRED PluginManager) -find_package(PkgConfig) include(sanitizer) -set(VIZER_STATIC_PLUGINS) +set(VISOR_STATIC_PLUGINS) enable_testing() add_subdirectory(3rd) diff --git a/README.md b/README.md index 70c1fb3c3..909f18202 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,6 @@ ![pktvisor](docs/images/pktvisor-header.png) -> This project is in [active development](https://github.com/ns1/community/blob/master/project_status/ACTIVE_DEVELOPMENT.md). - -![Build status](https://github.com/ns1/pktvisor/workflows/Build/badge.svg) +![Build status](https://github.com/ns1labs/pktvisor/workflows/Build/badge.svg) [![LGTM alerts](https://img.shields.io/lgtm/alerts/g/ns1/pktvisor.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/ns1/pktvisor/alerts/) [![Coverity alerts](https://img.shields.io/coverity/scan/22731.svg)](https://scan.coverity.com/projects/ns1-pktvisor) @@ -19,23 +17,23 @@ ## What is pktvisor? -**pktvisor** (pronounced "packet visor") is an **observability tool** for _summarizing_ high volume, information -overloaded data streams directly at the edge. Its goal is to extract the useful signal from the less useful noise; to -separate the needles from the haystacks as close to the source as possible. This results in lightweight, immediately -actionable observability data at a tiny fraction of the raw data size. +**pktvisor** (pronounced "packet visor") is an **observability tool** for _summarizing_ high volume, information dense +data streams down to lightweight, immediately actionable observability data directly at the edge. Its goal is to extract +the signal from the noise; to separate the needles from the haystacks as close to the source as possible. -It is a resource efficient, side-car style agent built from the ground up to be dynamically controlled in real time via -API. Its output is useful both on-node via command line (for a localized, hyper real-time view) as well as centrally -collected into industry standard observability stacks like Prometheus and Grafana. +It is a resource efficient, side-car style agent built from the ground up to be modular and dynamically controlled in +real time via API. Input and processor modules may be dynamically loaded at runtime. Metric output can be visualized +both on-node via command line UI (for a localized, hyper real-time view) +as well as centrally collected into industry standard observability stacks like Prometheus and Grafana. -The modular input stream system is designed to _tap into_ data streams, and currently focuses +The [input stream system](src/inputs) is designed to _tap into_ data streams, and currently focuses on [packet capture](https://en.wikipedia.org/wiki/Packet_analyzer) but will soon support additional taps such as [sFlow](https://en.wikipedia.org/wiki/SFlow) / [Netflow](https://en.wikipedia.org/wiki/NetFlow) , [dnstap](https://dnstap.info/), [envoy taps](https://www.envoyproxy.io/docs/envoy/latest/operations/traffic_tapping), and [eBPF](https://ebpf.io/). -The modular, real-time stream processor includes full application level analysis, and typically summarizes to one minute -buckets of: +The [stream processor system](src/handlers) includes full application level analysis, +and [efficiently](https://en.wikipedia.org/wiki/Streaming_algorithm) summarizes to one minute buckets of: * Counters * Histograms and Quantiles @@ -44,22 +42,22 @@ buckets of: * Set Cardinality * GeoIP -pktvisor has its origins in observability of critical internet infrastructure, including traffic engineering and DDoS -protection. +pktvisor has its origins in observability of critical internet infrastructure in support of DDoS protection, traffic +engineering, and ongoing operations. -These screenshots display both the command line and centralized views of -the [Network](https://github.com/ns1/pktvisor/tree/master/src/handlers/net) -and [DNS](https://github.com/ns1/pktvisor/tree/master/src/handlers/dns) stream processors, and the types of summary -information provided: +These screenshots display both the [command line](golang/) and [centralized views](centralized_collection/) of +the [Network](src/handlers/net) +and [DNS](src/handlers/dns) stream processors, and the types of summary information provided: ![Image of CLI UI](docs/images/pktvisor3-cli-ui-screenshot.png) -![Image of Grafana Dash](docs/images/pktvisor3-grafana-screenshot.png) +![Image 1 of Grafana Dash](docs/images/pktvisor-grafana-screenshot1.png) +![Image 2 of Grafana Dash](docs/images/pktvisor-grafana-screenshot2.png) ## Get Started ### Docker -The easiest way to get started with pktvisor is to use +One of the easiest ways to get started with pktvisor is to use the [public docker image](https://hub.docker.com/r/ns1labs/pktvisor). The image contains the collector agent (`pktvisord`), the command line UI (`pktvisor-cli`), and the pcap file analyzer (`pktvisor-pcap`). When running the container, you specify which tool to run. @@ -74,8 +72,8 @@ docker pull ns1labs/pktvisor This will start in the background and stay running. Note that the final two arguments select `pktvisord` agent and the `any` ethernet interface for packet capture. You may substitute `any` for a known interface on your device, such -as `eth0`. _Note that this step requires docker host networking_ to observe traffic outside the container, and that only -Linux supports host networking currently: +as `eth0`. _Note that this step requires docker host networking_ to observe traffic outside the container, and +that [currently only Linux supports host networking](https://docs.docker.com/network/host/): ``` docker run --rm --net=host -d ns1labs/pktvisor pktvisord any @@ -85,17 +83,47 @@ docker run --rm --net=host -d ns1labs/pktvisor pktvisord any After the agent is running, you can observe results locally with the included command line UI. This command will run the UI (`pktvisor-cli`) in the foreground, and exit when Ctrl-C is pressed. It connects to the running agent locally using -the built in [REST API](https://app.swaggerhub.com/apis/ns1labs/pktvisor/3.1.0#/). +the built in [REST API](https://app.swaggerhub.com/apis/ns1labs/pktvisor/3.0.0-oas3). + ``` docker run -it --rm --net=host ns1labs/pktvisor pktvisor-cli ``` -### Other Installation Methods +### Linux Static Binary (AppImage) + +You may also use the Linux static binary, built with [AppImage](https://appimage.org/), which is available for +download [on the Releases page](https://github.com/ns1labs/pktvisor/releases). It is designed to work on all modern +Linux distributions and does not require installation or any other dependencies. + +```shell +curl https://github.com/ns1labs/pktvisor/releases/download/v3.2.0/pktvisor-x86_64-3.2.0.AppImage --output pktvisor-x86_64.AppImage +chmod +x pktvisor-x86_64.AppImage +./pktvisor-x86_64.AppImage pktvisord -h +``` + +The AppImage contains the collector agent (`pktvisord`), the command line UI (`pktvisor-cli`), and the pcap file +analyzer (`pktvisor-pcap`). You can specify which tool to run by passing it as the first argument: + +```shell +./pktvisor-x86_64.AppImage pktvisor-pcap -h +``` + +```shell +./pktvisor-x86_64.AppImage pktvisor-cli -h +``` + +Note that when running the AppImage version of the agent, you may want to use the `-d` argument to deamonize (run in the +background), and either the `--log-file` or `--syslog` argument to record logs. + +Also see [Advanced Agent Example](#advanced-agent-example). + +### Other Platforms + +If you are unable to use the Docker container or the Linux binary, then you will have to build your own executable, +please see the [Build](#build) section below. -There are currently no prebuilt packages besides Docker, _although we are working on additional installation methods_. -If you have a preferred method you would like to see support -for, [please create an issue](https://github.com/ns1/pktvisor/issues/new). Until then, you may build your own -executable, please see the [Build](#build) section below. +If you have a preferred installation method that you would like to see support +for, [please create an issue](https://github.com/ns1/pktvisor/issues/new). ## Docs @@ -109,6 +137,12 @@ Current command line options are described with: docker run --rm ns1labs/pktvisor pktvisord --help ``` +or + +``` +./pktvisor-x86_64.AppImage pktvisord --help +``` + ``` Usage: @@ -120,7 +154,6 @@ docker run --rm ns1labs/pktvisor pktvisord --help IFACE, if specified, is either a network interface or an IP address (4 or 6). If this is specified, a "pcap" input stream will be automatically created, with "net" and "dns" handler modules attached. - ** Note that this is deprecated; you should instead use --admin-api and create the pcap input stream via API. Base Options: -l HOST Run webserver on the given host or IP [default: localhost] @@ -128,16 +161,23 @@ docker run --rm ns1labs/pktvisor pktvisord --help --admin-api Enable admin REST API giving complete control plane functionality [default: false] When not specified, the exposed API is read-only access to summarized metrics. When specified, write access is enabled for all modules. + -d Daemonize; fork and continue running in the background [default: false] -h --help Show this screen -v Verbose log output --no-track Don't send lightweight, anonymous usage metrics. --version Show version - --geo-city FILE GeoLite2 City database to use for IP to Geo mapping (if enabled) - --geo-asn FILE GeoLite2 ASN database to use for IP to ASN mapping (if enabled) + --geo-city FILE GeoLite2 City database to use for IP to Geo mapping + --geo-asn FILE GeoLite2 ASN database to use for IP to ASN mapping + Logging Options: + --log-file FILE Log to the given output file name + --syslog Log to syslog + Prometheus Options: + --prometheus Enable native Prometheus metrics at path /metrics + --prom-instance ID Optionally set the 'instance' label to ID Handler Module Defaults: --max-deep-sample N Never deep sample more than N% of streams (an int between 0 and 100) [default: 100] --periods P Hold this many 60 second time periods of history in memory [default: 5] - pcap Input Module Options (deprecated, use admin-api instead): + pcap Input Module Options: -b BPF Filter packets using the given BPF string -H HOSTSPEC Specify subnets (comma separated) to consider HOST, in CIDR form. In live capture this /may/ be detected automatically from capture device but /must/ be specified for pcaps. Example: "10.0.1.0/24,10.0.2.1/32,2001:db8::/64" @@ -148,10 +188,15 @@ docker run --rm ns1labs/pktvisor pktvisord --help ### Command Line UI Usage The command line UI (`pktvisor-cli`) connects directly to a pktvisord agent to visualize the real time stream -summarization, which is by default a sliding 5 minute time window. It can also connect to a remote agent. +summarization, which is by default a sliding 5 minute time window. It can also connect to an agent running on a remote +host. ``` -docker run --rm ns1labs/pktvisor pktvisor-cli --help +docker run --rm ns1labs/pktvisor pktvisor-cli -h +``` + +```shell +./pktvisor-x86_64.AppImage pktvisor-cli -h ``` ``` @@ -177,9 +222,11 @@ Usage: options, and does all of the same analysis, as the live agent version. ``` - docker run --rm ns1labs/pktvisor pktvisor-pcap --help +``` +```shell +./pktvisor-x86_64.AppImage pktvisor-pcap --help ``` ``` @@ -213,9 +260,40 @@ output will contain the JSON summarization output, which you can capture or pipe $ docker run --rm -v /pktvisor/src/tests/fixtures:/pcaps ns1labs/pktvisor pktvisor-pcap /pcaps/dns_ipv4_udp.pcap | jq . -[2021-03-11 18:45:04.572] [pktvisor] [info] Load input plugin: PcapInputModulePlugin dev.vizer.module.input/1.0 -[2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: DnsHandler dev.vizer.module.handler/1.0 -[2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: NetHandler dev.vizer.module.handler/1.0 +[2021-03-11 18:45:04.572] [pktvisor] [info] Load input plugin: PcapInputModulePlugin dev.visor.module.input/1.0 +[2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: DnsHandler dev.visor.module.handler/1.0 +[2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: NetHandler dev.visor.module.handler/1.0 +... +processed 140 packets +{ + "5m": { + "dns": { + "cardinality": { + "qname": 70 + }, + "period": { + "length": 6, + "start_ts": 1567706414 + }, + "top_nxdomain": [], + "top_qname2": [ + { + "estimate": 140, + "name": ".test.com" + } + ], +... +``` + +The AppImage can access local files as any normal binary: + +``` + +$ ./pktvisor-x86_64.AppImage pktvisor-pcap /pcaps/dns_ipv4_udp.pcap | jq . + +[2021-03-11 18:45:04.572] [pktvisor] [info] Load input plugin: PcapInputModulePlugin dev.visor.module.input/1.0 +[2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: DnsHandler dev.visor.module.handler/1.0 +[2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: NetHandler dev.visor.module.handler/1.0 ... processed 140 packets { @@ -240,6 +318,8 @@ processed 140 packets ### Metrics Collection +#### Metrics from the REST API + The metrics are available from the agent in JSON format via the [REST API](#rest-api). For most use cases, you will want to collect the most recent full 1-minute bucket, once per minute: @@ -273,19 +353,39 @@ interval = "60" ``` -#### Prometheus +#### Prometheus Metrics + +`pktvisord` also has native Prometheus support, which you can enable by passing `--prometheus`. The metrics are +available for collection at the standard `/metrics` endpoint. + +```shell +$ ./pktvisor-x86_64.AppImage pktvisord -d --prometheus eth0 +$ curl localhost:10853/metrics +# HELP dns_wire_packets_udp Total DNS wire packets received over UDP (ingress and egress) +# TYPE dns_wire_packets_udp gauge +dns_wire_packets_udp{instance="node"} 28 +# HELP dns_rates_total Rate of all DNS wire packets (combined ingress and egress) per second +# TYPE dns_rates_total summary +dns_rates_total{instance="node",quantile="0.5"} 0 +dns_rates_total{instance="node",quantile="0.9"} 4 +dns_rates_total{instance="node",quantile="0.95"} 4 +... +``` + +You can set the `instance` label by passing `--prom-instance ID` -`pktvisord` will have native Prometheus support in version 3.2.0. Until -then, [an adapter is available](https://github.com/ns1/pktvisor/tree/master/reporting/pktvisor_prometheus) in the -repository. +If you are interested in centralized collection +using [remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage), including to +cloud providers, there is a [docker image available](https://hub.docker.com/r/ns1labs/pktvisor-prom-write) to make this +easy. See [centralized_collection/prometheus](centralized_collection/prometheus) for more. ### REST API -REST API documentation, including a description of the metrics that are available, is available -in [OpenAPI Format](https://app.swaggerhub.com/apis/ns1labs/pktvisor/3.1.0#/) +REST API documentation is available in [OpenAPI Format](https://app.swaggerhub.com/apis/ns1labs/pktvisor/3.0.0-oas3) -Please note that the administration control plane API is currently undergoing heavy iteration and so is not yet -documented. If you have a use case that requires the administration API, please [contact us](#contact-us) to discuss. +Please note that the administration control plane API (`--admin-api`) is currently undergoing heavy iteration and so is +not yet documented. If you have a use case that requires the administration API, please [contact us](#contact-us) to +discuss. ### Advanced Agent Example @@ -302,19 +402,33 @@ docker run --rm --net=host -d \ eth0 ``` +The same command with AppImage and logging to syslog: + +``` +./pktvisor-x86_64.AppImage pktvisord -d --syslog \ + --geo-city /geo/GeoIP2-City.mmdb \ + --geo-asn /geo/GeoIP2-ISP.mmdb \ + -H 192.168.0.54/32,127.0.0.1/32 \ + eth0 +``` + ### Further Documentation -We recognize the value of first class documentation, and this section is being expanded. +We recognize the value of first class documentation, and we are working on further documentation including expanded and +updated REST API documentation, internal documentation for developers of input and handler modules (and those who want +to contribute to pktvisor), and a user manual. + Please [contact us](#contact-us) if you have any questions on installation, use, or development. ## Contact Us We are _very_ interested in hearing about your use cases, feature requests, and other feedback! -* [File an issue](https://github.com/ns1/pktvisor/issues/new) -* Use our [public feature board](https://github.com/ns1/pktvisor/projects/1) -* Start a [Discussion](https://github.com/ns1/pktvisor/discussions) -* [Join us on Slack](https://join.slack.com/t/getorb/shared_invite/zt-nn4joou9-71Bp3HkubYf5Adh9c4cDNw) +* [File an issue](https://github.com/ns1labs/pktvisor/issues/new) +* Use our [public work board](https://github.com/ns1labs/pktvisor/projects/1) +* Use our [public backlog board](https://github.com/ns1labs/pktvisor/projects/2) +* Start a [Discussion](https://github.com/ns1labs/pktvisor/discussions) +* [Join us on Slack](https://join.slack.com/t/ns1labs/shared_invite/zt-p0uzy9zq-ZgD~QkKQ9cWMSiI4DgJSaA) * Send mail to [info@pktvisor.dev](mailto:info@pktvisor.dev) ## Build @@ -324,16 +438,19 @@ build system requires CMake and the [Conan](https://conan.io/) package manager s pktvisor adheres to [semantic versioning](https://semver.org/). +pktvisor is developed and tested on Linux and OSX. Windows is not yet officially supported, though the dependencies and +code base do not preclude a Windows build. If you are interested in developing a Windows version, +please [contact us](#contact-us). + #### Dependencies -* Linux or OSX * [Conan](https://conan.io/) C++ package manager * CMake >= 3.13 (`cmake`) * C++ compiler supporting C++17 -* MaxMind DB (`libmaxmindb-dev`) -* [PcapPlusPlus](https://github.com/ns1/PcapPlusPlus) (NS1 fork) -In addition, debugging integration tests requires: +For the list of packages included by conan, see [conanfile.txt](conanfile.txt) + +In addition, debugging integration tests make use of: * [jq](https://stedolan.github.io/jq/) * [graphtage](https://github.com/trailofbits/graphtage) @@ -343,18 +460,28 @@ In addition, debugging integration tests requires: The general build steps are: ``` -$ git clone https://github.com/ns1/pktvisor.git -$ cd pktvisor -$ mkdir build && cd build -$ conan install .. -$ cmake .. -$ make all test -$ bin/pktvisord --help +# clone the repository +git clone https://github.com/ns1labs/pktvisor.git +cd pktvisor +mkdir build && cd build + +# set up conan +conan profile update settings.compiler.libcxx=libstdc++11 default +conan config set general.revisions_enabled=1 + +# configure and handle dependencies +cmake -DCMAKE_BUILD_TYPE=Release .. + +# build and run tests +make all test + +# the binaries will be in the build/bin directory +bin/pktvisord --help ``` As development environments can vary widely, please see -the [Dockerfile](https://github.com/ns1/pktvisor/blob/master/docker/Dockerfile) -and [Continuous Integration build file](https://github.com/ns1/pktvisor/blob/master/.github/workflows/cmake.yml) for +the [Dockerfile](https://github.com/ns1labs/pktvisor/blob/master/docker/Dockerfile) +and [Continuous Integration build file](https://github.com/ns1labs/pktvisor/blob/master/.github/workflows/cmake.yml) for reference. ## Contribute diff --git a/appimage/Dockerfile.part b/appimage/Dockerfile.part new file mode 100644 index 000000000..a162f194f --- /dev/null +++ b/appimage/Dockerfile.part @@ -0,0 +1,18 @@ +# file is needed for appimagetool when its run with --appimage-extract-and-run +# binutil is needed for strip +RUN apt-get update && apt-get install -yqq --no-install-recommends python3-pip binutils file dietlibc-dev gcc musl musl-tools \ + && pip3 install --user exodus-bundler --no-warn-script-location \ + && /root/.local/bin/exodus --verbose --tarball --output=/tmp/pktvisor.tgz $(which pktvisord) $(which pktvisor-pcap) \ + && mkdir pktvisor \ + && tar --strip=1 -xf /tmp/pktvisor.tgz -C pktvisor/ \ + && strip --verbose --strip-debug pktvisor/data/* \ + && cp $(which pktvisor-cli) /pktvisor/bin/pktvisor-cli + +# get latest appimagetool +ADD https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage /bin/appimage-tool + +# add entrypoints and desktop things +COPY appimage/pktvisor /pktvisor + +# create appimage without fuse +RUN chmod +x /bin/appimage-tool /pktvisor/AppRun && appimage-tool --appimage-extract-and-run /pktvisor/ \ No newline at end of file diff --git a/appimage/Makefile b/appimage/Makefile new file mode 100644 index 000000000..085a7079c --- /dev/null +++ b/appimage/Makefile @@ -0,0 +1,17 @@ +DEV_IMAGE ?= ns1labs/pktvisor:latest + +# in dev mode we just use the latest image as the start point +ifneq ($(strip $(DEV_MODE)),) +DOCKERFILE_HEAD = <(echo "FROM $(DEV_IMAGE)") +else +DOCKERFILE_HEAD = docker/Dockerfile +endif + +pktvisor-x86_64.AppImage: SHELL:=/bin/bash #needed for the fd when DOCKERFILE_HEAD is in DEV_MODE +pktvisor-x86_64.AppImage: + ID=$$(cd .. && cat $(DOCKERFILE_HEAD) appimage/Dockerfile.part | docker build -q -f- .) ; \ + bash export.sh $$ID + +.PHONEY: clean +clean: + rm pktvisor-x86_64.AppImage \ No newline at end of file diff --git a/appimage/README.md b/appimage/README.md new file mode 100644 index 000000000..b981cd4e3 --- /dev/null +++ b/appimage/README.md @@ -0,0 +1,28 @@ +AppImage Packaging +================== + +Creates an AppImage that contains `pktvisord` `pktvisor-pcap` and `pktvisor-cli`. This implementation builds the AppImage in a docker using the assets generated by the docker [build](../docker). Because not all of the assets are statically linked we use [Exodus](https://github.com/intoli/exodus) to capture a minimal set of dependencies for the AppImage + + +## Build: +`make pktvisor-x86_64.AppImage` + +## Development: +Because the build can take a while you may want to build the appimage from the latest docker image on docker hub. To do this you can set the `DEV_MODE=` environment to anything. Like so: + +`DEV_MODE=t make pktvisor-x86_64.AppImage` + +You may also specify a custom image in dev mode by setting the env file `DEV_IMAGE=` to the image you wish to build on. Example: + +`DEV_IMAGE="ns1labs/pktvisor:develop" DEV_MODE=t make pktvisor-x86_64.AppImage` + + +## Usage: +To use the AppImage from the command line you specify the binary you want to run as the first argument following the +pattern: + +` +./pktvisor-x86_64.AppImage pktvisord|pktvisor-pcap|pktvisor-cli +` + + diff --git a/appimage/export.sh b/appimage/export.sh new file mode 100644 index 000000000..00c27ff24 --- /dev/null +++ b/appimage/export.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +## +# Try to extract files from docker image as atomically as possible +# + +FILES=(pktvisor-x86_64.AppImage) + +die () { + echo "$@" >&2 + exit 1 +} + +# pass in image id as $1 +main () { + [[ $1 ]] || die "image name not specified" + + # make a trap that see the var + id= + cleanup() { + docker rm -v "$id" + } + + trap cleanup EXIT + + # make it + id=$(docker create $1) + [[ $? == 0 ]] || die "failed to create container for export" + + # take it + for file in "${FILES[@]}" ; do + docker cp "$id:$file" . + done +} + +main "$1" \ No newline at end of file diff --git a/appimage/pktvisor/AppRun b/appimage/pktvisor/AppRun new file mode 100644 index 000000000..b21a57ea4 --- /dev/null +++ b/appimage/pktvisor/AppRun @@ -0,0 +1,36 @@ +#!/bin/sh + +# borrowed from appimage directly +SELF=$(readlink -f "$0") +HERE=${SELF%/*} +export PATH="${HERE}/usr/bin/:${HERE}/usr/sbin/:${HERE}/usr/games/:${HERE}/bin/:${HERE}/sbin/${PATH:+:$PATH}" +export LD_LIBRARY_PATH="${HERE}/usr/lib/:${HERE}/usr/lib/i386-linux-gnu/:${HERE}/usr/lib/x86_64-linux-gnu/:${HERE}/usr/lib32/:${HERE}/usr/lib64/:${HERE}/lib/:${HERE}/lib/i386-linux-gnu/:${HERE}/lib/x86_64-linux-gnu/:${HERE}/lib32/:${HERE}/lib64/${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" +export PYTHONPATH="${HERE}/usr/share/pyshared/${PYTHONPATH:+:$PYTHONPATH}" +export XDG_DATA_DIRS="${HERE}/usr/share/${XDG_DATA_DIRS:+:$XDG_DATA_DIRS}" +export PERLLIB="${HERE}/usr/share/perl5/:${HERE}/usr/lib/perl5/${PERLLIB:+:$PERLLIB}" +export GSETTINGS_SCHEMA_DIR="${HERE}/usr/share/glib-2.0/schemas/${GSETTINGS_SCHEMA_DIR:+:$GSETTINGS_SCHEMA_DIR}" +export QT_PLUGIN_PATH="${HERE}/usr/lib/qt4/plugins/:${HERE}/usr/lib/i386-linux-gnu/qt4/plugins/:${HERE}/usr/lib/x86_64-linux-gnu/qt4/plugins/:${HERE}/usr/lib32/qt4/plugins/:${HERE}/usr/lib64/qt4/plugins/:${HERE}/usr/lib/qt5/plugins/:${HERE}/usr/lib/i386-linux-gnu/qt5/plugins/:${HERE}/usr/lib/x86_64-linux-gnu/qt5/plugins/:${HERE}/usr/lib32/qt5/plugins/:${HERE}/usr/lib64/qt5/plugins/${QT_PLUGIN_PATH:+:$QT_PLUGIN_PATH}" + +if [ $# -eq 0 ]; then + echo "No arguments provided: specify either 'pktvisor-cli', 'pktvisor-pcap' or 'pktvisord'. Try:" + echo "pktvisor-cli -h" + echo "pktvisor-pcap --help" + echo "pktvisord --help" + exit 1 +fi + +# begin entrypoint +BINARY="$1" +case "$BINARY" in +pktvisor-cli) shift ;; +pktvisor-pcap) shift ;; +pktvisord) shift ;; +esac + +# not a terminal try to alert the user +if [ ! -t 1 ]; then + xdg-open "file://${HERE}/TerminalEmulatorRequired.txt" + exit 1 +else + exec "${HERE}/bin/${BINARY}" "$@" +fi diff --git a/appimage/pktvisor/TerminalEmulatorRequired.txt b/appimage/pktvisor/TerminalEmulatorRequired.txt new file mode 100644 index 000000000..7f0d9f302 --- /dev/null +++ b/appimage/pktvisor/TerminalEmulatorRequired.txt @@ -0,0 +1 @@ +This app needs to be run from a terminal to function correctly. See the docs at https://github.com/ns1labs/pktvisor for more details. \ No newline at end of file diff --git a/appimage/pktvisor/pktvisor.desktop b/appimage/pktvisor/pktvisor.desktop new file mode 100644 index 000000000..35a4e20b4 --- /dev/null +++ b/appimage/pktvisor/pktvisor.desktop @@ -0,0 +1,7 @@ +[Desktop Entry] +Name=pktvisor +Terminal=true +Exec=AppRun +Icon=pktvisor +Type=Application +Categories=Utility; \ No newline at end of file diff --git a/appimage/pktvisor/pktvisor.png b/appimage/pktvisor/pktvisor.png new file mode 100644 index 000000000..b5a78ad08 Binary files /dev/null and b/appimage/pktvisor/pktvisor.png differ diff --git a/centralized_collection/README.md b/centralized_collection/README.md new file mode 100644 index 000000000..694c5e4aa --- /dev/null +++ b/centralized_collection/README.md @@ -0,0 +1,11 @@ +# Centralized Collection + +This directory contains resources for centrally collecting and visualizing the metrics from pktvisor. + +Because pktvisor exposes its metrics over a generic JSON interface, it is able to work with any time series or +relational database. This directory contains resources for interacting with common databases. + +See the individual READMEs for more information: + +* [Prometheus](prometheus/README.md) +* [Elasticsearch](elastic/README.md) diff --git a/centralized_collection/elastic/README.md b/centralized_collection/elastic/README.md new file mode 100644 index 000000000..f26eb7337 --- /dev/null +++ b/centralized_collection/elastic/README.md @@ -0,0 +1,6 @@ +# Centralized Elasticsearch Collection + +This directory contains resources for building a docker container that contains pktvisord and +the [telegraf](https://github.com/influxdata/telegraf) for collecting and sending metrics to Elasticsearch. + +It also contains an example [Grafana dashboard](grafana-dashboard-elk.json). diff --git a/reporting/docker/README.md b/centralized_collection/elastic/docker/README.md similarity index 100% rename from reporting/docker/README.md rename to centralized_collection/elastic/docker/README.md diff --git a/reporting/docker/config/telegraf-pop1.conf b/centralized_collection/elastic/docker/config/telegraf-pop1.conf similarity index 100% rename from reporting/docker/config/telegraf-pop1.conf rename to centralized_collection/elastic/docker/config/telegraf-pop1.conf diff --git a/reporting/docker/config/telegraf-pop2.conf b/centralized_collection/elastic/docker/config/telegraf-pop2.conf similarity index 100% rename from reporting/docker/config/telegraf-pop2.conf rename to centralized_collection/elastic/docker/config/telegraf-pop2.conf diff --git a/reporting/docker/datasource.json b/centralized_collection/elastic/docker/datasource.json similarity index 100% rename from reporting/docker/datasource.json rename to centralized_collection/elastic/docker/datasource.json diff --git a/reporting/docker/docker-compose.yml b/centralized_collection/elastic/docker/docker-compose.yml similarity index 100% rename from reporting/docker/docker-compose.yml rename to centralized_collection/elastic/docker/docker-compose.yml diff --git a/reporting/docker/setup.sh b/centralized_collection/elastic/docker/setup.sh similarity index 100% rename from reporting/docker/setup.sh rename to centralized_collection/elastic/docker/setup.sh diff --git a/reporting/docker/with_telegraf/Dockerfile b/centralized_collection/elastic/docker/with_telegraf/Dockerfile similarity index 89% rename from reporting/docker/with_telegraf/Dockerfile rename to centralized_collection/elastic/docker/with_telegraf/Dockerfile index 71902c2e8..d7c64d9a1 100644 --- a/reporting/docker/with_telegraf/Dockerfile +++ b/centralized_collection/elastic/docker/with_telegraf/Dockerfile @@ -1,6 +1,7 @@ +ARG PKTVISOR_TAG=latest FROM telegraf:1.16.2 as telegraf -FROM ns1labs/pktvisor:3.0.7 +FROM ns1labs/pktvisor:${PKTVISOR_TAG} COPY --from=telegraf /usr/bin/telegraf /usr/local/bin/telegraf diff --git a/reporting/docker/with_telegraf/misc/entrypoint b/centralized_collection/elastic/docker/with_telegraf/misc/entrypoint similarity index 100% rename from reporting/docker/with_telegraf/misc/entrypoint rename to centralized_collection/elastic/docker/with_telegraf/misc/entrypoint diff --git a/reporting/docker/with_telegraf/misc/run-dig.sh b/centralized_collection/elastic/docker/with_telegraf/misc/run-dig.sh similarity index 100% rename from reporting/docker/with_telegraf/misc/run-dig.sh rename to centralized_collection/elastic/docker/with_telegraf/misc/run-dig.sh diff --git a/reporting/docker/with_telegraf/misc/run-pktvisord.sh b/centralized_collection/elastic/docker/with_telegraf/misc/run-pktvisord.sh similarity index 100% rename from reporting/docker/with_telegraf/misc/run-pktvisord.sh rename to centralized_collection/elastic/docker/with_telegraf/misc/run-pktvisord.sh diff --git a/reporting/docker/with_telegraf/misc/run-telegraf.sh b/centralized_collection/elastic/docker/with_telegraf/misc/run-telegraf.sh similarity index 100% rename from reporting/docker/with_telegraf/misc/run-telegraf.sh rename to centralized_collection/elastic/docker/with_telegraf/misc/run-telegraf.sh diff --git a/reporting/grafana-dashboard.json b/centralized_collection/elastic/grafana-dashboard-elk.json similarity index 100% rename from reporting/grafana-dashboard.json rename to centralized_collection/elastic/grafana-dashboard-elk.json diff --git a/reporting/top_n.elk b/centralized_collection/elastic/top_n.elk similarity index 100% rename from reporting/top_n.elk rename to centralized_collection/elastic/top_n.elk diff --git a/centralized_collection/prometheus/README.md b/centralized_collection/prometheus/README.md new file mode 100644 index 000000000..15f83d8a8 --- /dev/null +++ b/centralized_collection/prometheus/README.md @@ -0,0 +1,44 @@ +# Centralized Prometheus Collection + +This directory contains resources for building a docker container aiding centralized prometheus collection. It is +published to Docker hub at https://hub.docker.com/r/ns1labs/pktvisor-prom-write + +It combines pktvisord with the [Grafana Agent](https://github.com/grafana/agent) for collecting and sending metrics to +Prometheus through +[remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage), including to cloud +providers like [Grafana Cloud](https://grafana.com/products/cloud/). + +There is a sample [Grafana dashboard](grafana-dashboard-prometheus.json) which provides a good starting point for +visualizing pktvisor metrics. You can also find it online via +the [Grafana community dashboards](https://grafana.com/grafana/dashboards/14221), allowing you to import easily into any +Grafana installation (ID 14221). + +Example: + +```shell +docker pull ns1labs/pktvisor-prom-write +docker run -d --net=host --env PKTVISORD_ARGS="--prom-instance " \ +--env REMOTE_URL="https:///api/prom/push" --env USERNAME="" \ +--env PASSWORD="" ns1labs/pktvisor-prom-write +``` + +Example with Geo enabled (assuming files are located in `/usr/local/geo`): + +```shell +docker pull ns1labs/pktvisor-prom-write +docker run -d --mount type=bind,source=/usr/local/geo,target=/geo --net=host --env \ +PKTVISORD_ARGS="--prom-instance --geo-city /geo/GeoIP2-City.mmdb --geo-asn /geo/GeoIP2-ISP.mmdb " \ +--env REMOTE_URL="https:///api/prom/push" --env USERNAME="" --env PASSWORD="" ns1labs/pktvisor-prom-write +``` + +There are a several pieces of information you need to substitute above: + +* ``: The prometheus "instance" label for all metrics, e.g. "myhost" +* ``: The ethernet interface to capture on, e.g. "eth0" +* ``: The remote host to remote_write the prometheus metric to +* ``: If required by your prometheus setup, the user name to connect. If not required, leave off this + environment variable. +* ``: If required by your prometheus setup, the password to connect. If not required, leave off this + environment variable. + +Other pktvisor arguments may be passed in the PKTVISORD_ARGS environment variable. diff --git a/centralized_collection/prometheus/docker-grafana-agent/Dockerfile b/centralized_collection/prometheus/docker-grafana-agent/Dockerfile new file mode 100644 index 000000000..9dae75965 --- /dev/null +++ b/centralized_collection/prometheus/docker-grafana-agent/Dockerfile @@ -0,0 +1,20 @@ +ARG PKTVISOR_TAG=latest +FROM grafana/agent:latest as agent + +FROM ns1labs/pktvisor:${PKTVISOR_TAG} + +COPY --from=agent /bin/agent /usr/local/bin/agent + +RUN apt-get update \ + && apt-get install -y runit-init \ + && rm -rf /var/lib/apt \ + && mkdir -p /etc/runit/ \ + && mkdir -p /etc/agent/ \ + && mkdir -p /etc/agent/data \ + && rm -rf /etc/service/* + +COPY files/run-grafana-agent.sh /etc/service/agent/run +COPY files/run-pktvisord.sh /etc/service/pktvisord/run +COPY files/entrypoint /usr/local/bin/entrypoint + +ENTRYPOINT /usr/local/bin/entrypoint diff --git a/centralized_collection/prometheus/docker-grafana-agent/files/entrypoint b/centralized_collection/prometheus/docker-grafana-agent/files/entrypoint new file mode 100755 index 000000000..9f2bc7b5c --- /dev/null +++ b/centralized_collection/prometheus/docker-grafana-agent/files/entrypoint @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +( + cat </etc/agent/agent.yaml + +exec runsvdir /etc/service/ diff --git a/centralized_collection/prometheus/docker-grafana-agent/files/run-grafana-agent.sh b/centralized_collection/prometheus/docker-grafana-agent/files/run-grafana-agent.sh new file mode 100755 index 000000000..30cc27256 --- /dev/null +++ b/centralized_collection/prometheus/docker-grafana-agent/files/run-grafana-agent.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +exec agent --config.file=/etc/agent/agent.yaml --prometheus.wal-directory=/etc/agent/data diff --git a/centralized_collection/prometheus/docker-grafana-agent/files/run-pktvisord.sh b/centralized_collection/prometheus/docker-grafana-agent/files/run-pktvisord.sh new file mode 100755 index 000000000..aae60c15c --- /dev/null +++ b/centralized_collection/prometheus/docker-grafana-agent/files/run-pktvisord.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +[[ "$PKTVISORD_ARGS" == "" ]] && PKTVISORD_ARGS="eth0" + +exec pktvisord --prometheus $PKTVISORD_ARGS diff --git a/centralized_collection/prometheus/grafana-dashboard-prometheus.json b/centralized_collection/prometheus/grafana-dashboard-prometheus.json new file mode 100644 index 000000000..dff0862fb --- /dev/null +++ b/centralized_collection/prometheus/grafana-dashboard-prometheus.json @@ -0,0 +1,3101 @@ +{ + "__inputs": [ + { + "name": "DS_GRAFANACLOUD-NS1RD81-PROM", + "label": "grafanacloud-ns1rd81-prom", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "7.5.3" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart v2", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "gnetId": null, + "graphTooltip": 1, + "id": null, + "iteration": 1618345430814, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 4, + "panels": [], + "title": "Network", + "type": "row" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 19, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "displayLabels": [ + "percent" + ], + "legend": { + "displayMode": "hidden", + "placement": "right", + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "values": false + }, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "packets_ipv4{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "IPv4", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "packets_ipv6{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "IPv6", + "queryType": "randomWalk", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "L3 Protocols", + "type": "piechart" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 3, + "y": 1 + }, + "id": 20, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "displayLabels": [ + "percent" + ], + "legend": { + "displayMode": "hidden", + "placement": "right", + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "values": false + }, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "packets_tcp{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "TCP", + "queryType": "randomWalk", + "refId": "C" + }, + { + "exemplar": true, + "expr": "packets_udp{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "UDP", + "queryType": "randomWalk", + "refId": "D" + }, + { + "exemplar": true, + "expr": "packets_other_l4{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "Other L4", + "queryType": "randomWalk", + "refId": "E" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "L4 Protocols", + "type": "piechart" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 12, + "y": 1 + }, + "id": 21, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^sample_rate$/", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "packets_deep_samples{instance=~\"$instance\"}/packets_total{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "sample_rate", + "queryType": "randomWalk", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Deep Inspection", + "type": "gauge" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 15, + "y": 1 + }, + "id": 22, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^packets_rates_pps_total\\{instance=\"gw\", job=\"pktvisor\", quantile=\"0\\.95\"\\}$/", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "packets_rates_pps_total{instance=~\"$instance\",quantile=\"0.95\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Avg Rate p95", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 2, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:360", + "alias": "p99 In", + "lines": false, + "pointradius": 1, + "points": true + }, + { + "$$hashKey": "object:394", + "alias": "/Out$/", + "color": "#8AB8FF", + "transform": "negative-Y" + }, + { + "$$hashKey": "object:401", + "alias": "p99 Out", + "lines": false, + "pointradius": 1, + "points": true + }, + { + "$$hashKey": "object:451", + "alias": "/In$/", + "color": "#56A64B" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "packets_rates_pps_in{instance=~\"$instance\",quantile=\"0.9\"}", + "interval": "", + "legendFormat": "p90 In", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "packets_rates_pps_in{instance=~\"$instance\",quantile=\"0.99\"}", + "hide": false, + "interval": "", + "legendFormat": "p99 In", + "queryType": "randomWalk", + "refId": "B" + }, + { + "exemplar": true, + "expr": "packets_rates_pps_out{instance=~\"$instance\",quantile=\"0.9\"}", + "hide": false, + "interval": "", + "legendFormat": "p90 Out", + "queryType": "randomWalk", + "refId": "C" + }, + { + "exemplar": true, + "expr": "packets_rates_pps_out{instance=~\"$instance\",quantile=\"0.99\"}", + "hide": false, + "interval": "", + "legendFormat": "p99 Out", + "queryType": "randomWalk", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Packet In/Out", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 7, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "packets_ipv4{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "IPv4", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "packets_ipv6{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "IPv6", + "queryType": "randomWalk", + "refId": "B" + }, + { + "exemplar": true, + "expr": "packets_tcp{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "TCP", + "queryType": "randomWalk", + "refId": "C" + }, + { + "exemplar": true, + "expr": "packets_udp{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "UDP", + "queryType": "randomWalk", + "refId": "D" + }, + { + "exemplar": true, + "expr": "packets_other_l4{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "Other L4", + "queryType": "randomWalk", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "L3/L4 Protocols", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 15 + }, + "hiddenSeries": false, + "id": 11, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "packets_top_geoLoc{instance=~\"$instance\"}/60", + "format": "time_series", + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "packets_top_ASN{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "ASN {{name}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top Geo IP/ASN", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:334", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:335", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 12, + "y": 15 + }, + "id": 15, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Packets (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (packets_top_ipv4{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top IPv4", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Packets", + "name": "IP" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "IP": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Packets (sum)" + }, + "properties": [ + { + "id": "custom.width", + "value": 255 + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 17, + "y": 15 + }, + "id": 16, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Packets (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (packets_top_geoLoc{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top Geo", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Packets", + "name": "Geo" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "Geo": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "unit": "pps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 9, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "packets_top_ipv4{instance=~\"$instance\"}/60", + "format": "time_series", + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "packets_top_ipv6{instance=~\"$instance\"}/60", + "format": "time_series", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top IPs", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 12, + "y": 22 + }, + "id": 18, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Packets (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (packets_top_ipv6{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top IPv6", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Packets", + "name": "IP" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "IP": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 17, + "y": 22 + }, + "id": 17, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Packets (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (packets_top_ASN{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top ASN", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Packets", + "name": "Geo" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "Geo": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 6, + "panels": [], + "title": "DNS", + "type": "row" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 0, + "y": 30 + }, + "id": 23, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^sample_rate$/", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "dns_wire_packets_deep_samples{instance=~\"$instance\"}/dns_wire_packets_total{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "sample_rate", + "queryType": "randomWalk", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Deep Inspection", + "type": "gauge" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 3, + "y": 30 + }, + "id": 24, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^dns_rates_total\\{instance=\"gw\", job=\"pktvisor\", quantile=\"0\\.95\"\\}$/", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "dns_rates_total{instance=~\"$instance\",quantile=\"0.95\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Avg Rate p95", + "type": "gauge" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 12, + "y": 30 + }, + "id": 37, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "displayLabels": [], + "legend": { + "displayMode": "hidden", + "placement": "right", + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "values": false + }, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "dns_top_qtype{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "QTypes", + "type": "piechart" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 15, + "y": 30 + }, + "id": 38, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "displayLabels": [], + "legend": { + "displayMode": "hidden", + "placement": "right", + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "values": false + }, + "text": {} + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "dns_top_rcode{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Result Codes", + "type": "piechart" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 37 + }, + "hiddenSeries": false, + "id": 14, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:584", + "alias": "p99", + "color": "#96D98D", + "lines": false, + "pointradius": 1, + "points": true + }, + { + "$$hashKey": "object:605", + "alias": "p90", + "color": "#37872D" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_rates_total{instance=~\"$instance\",quantile=\"0.9\"}", + "interval": "", + "legendFormat": "p90", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "dns_rates_total{instance=~\"$instance\",quantile=\"0.99\"}", + "hide": false, + "interval": "", + "legendFormat": "p99", + "queryType": "randomWalk", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNS Packets (In+Out)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 37 + }, + "hiddenSeries": false, + "id": 25, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_wire_packets_ipv4{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "IPv4", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "dns_wire_packets_ipv6{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "IPv6", + "queryType": "randomWalk", + "refId": "B" + }, + { + "exemplar": true, + "expr": "dns_wire_packets_tcp{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "TCP", + "queryType": "randomWalk", + "refId": "C" + }, + { + "exemplar": true, + "expr": "dns_wire_packets_udp{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "UDP", + "queryType": "randomWalk", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNS Protocols", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 44 + }, + "hiddenSeries": false, + "id": 27, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:371", + "alias": "Xact Out (remote server)", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_xact_in_total{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "Xact In (local server)", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "dns_xact_out_total{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "Xact Out (remote server)", + "queryType": "randomWalk", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNS Transactions", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 44 + }, + "hiddenSeries": false, + "id": 26, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_wire_packets_nxdomain{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "NXDOMAIN", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "dns_wire_packets_srvfail{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "SRVFAIL", + "queryType": "randomWalk", + "refId": "C" + }, + { + "exemplar": true, + "expr": "dns_wire_packets_refused{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "REFUSED", + "queryType": "randomWalk", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNS Errors", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 51 + }, + "hiddenSeries": false, + "id": 30, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_top_qname2{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "dns_top_qname3{instance=~\"$instance\"}/60", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top DNS Names", + "tooltip": { + "shared": false, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 12, + "y": 51 + }, + "id": 28, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Requests (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (dns_top_qname2{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top QName2", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Requests", + "name": "Name" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "IP": { + "aggregations": [], + "operation": "groupby" + }, + "Name": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "Requests": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 17, + "y": 51 + }, + "id": 35, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Requests (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (dns_top_qname2{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top QName2", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Requests", + "name": "Name" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "IP": { + "aggregations": [], + "operation": "groupby" + }, + "Name": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "Requests": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 31, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_top_rcode{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top DNS RCodes", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 12, + "y": 58 + }, + "id": 29, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Requests (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (dns_top_rcode{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top RCodes", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Requests", + "name": "Name" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "IP": { + "aggregations": [], + "operation": "groupby" + }, + "Name": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "Requests": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-RdYlGr" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": true + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "dark-red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 17, + "y": 58 + }, + "id": 36, + "interval": "1m", + "maxDataPoints": 180, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Requests (sum)" + } + ] + }, + "pluginVersion": "7.5.3", + "targets": [ + { + "exemplar": true, + "expr": "sum by (name) (dns_top_qtype{instance=~\"$instance\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Top QTypes", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Requests", + "name": "Name" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "IP": { + "aggregations": [], + "operation": "groupby" + }, + "Name": { + "aggregations": [], + "operation": "groupby" + }, + "Packets": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "Requests": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 64 + }, + "hiddenSeries": false, + "id": 34, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:621", + "alias": "Out p95", + "transform": "negative-Y" + }, + { + "$$hashKey": "object:628", + "alias": "Out p99", + "lines": false, + "pointradius": 1, + "points": true, + "transform": "negative-Y" + }, + { + "$$hashKey": "object:678", + "alias": "In p99", + "lines": false, + "pointradius": 1, + "points": true + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_xact_in_quantiles_us{instance=~\"$instance\",quantile=\"0.95\"}", + "interval": "", + "legendFormat": "In p95", + "queryType": "randomWalk", + "refId": "A" + }, + { + "exemplar": true, + "expr": "dns_xact_out_quantiles_us{instance=~\"$instance\",quantile=\"0.95\"}", + "hide": false, + "interval": "", + "legendFormat": "Out p95", + "queryType": "randomWalk", + "refId": "B" + }, + { + "exemplar": true, + "expr": "dns_xact_out_quantiles_us{instance=~\"$instance\",quantile=\"0.99\"}", + "hide": false, + "interval": "", + "legendFormat": "Out p99", + "queryType": "randomWalk", + "refId": "C" + }, + { + "exemplar": true, + "expr": "dns_xact_in_quantiles_us{instance=~\"$instance\",quantile=\"0.99\"}", + "hide": false, + "interval": "", + "legendFormat": "In p99", + "queryType": "randomWalk", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNS Transaction", + "tooltip": { + "shared": false, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "µs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 65 + }, + "hiddenSeries": false, + "id": 32, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_top_qtype{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top DNS QTypes", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 72 + }, + "hiddenSeries": false, + "id": 33, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": 180, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "dns_top_udp_ports{instance=~\"$instance\"}/60", + "interval": "", + "legendFormat": "{{name}}", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top DNS Ports", + "tooltip": { + "shared": false, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:281", + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:282", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "1m", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "${DS_GRAFANACLOUD-NS1RD81-PROM}", + "definition": "label_values(instance)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "label_values(instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "datasource": "grafanacloud-ns1rd81-prom", + "description": null, + "error": null, + "filters": [], + "hide": 0, + "label": null, + "name": "Filters", + "skipUrlSync": false, + "type": "adhoc" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "pktvisor - Prometheus", + "uid": "MFI2PQlGk", + "version": 45 +} \ No newline at end of file diff --git a/cmake/conan.cmake b/cmake/conan.cmake new file mode 100644 index 000000000..f9879bf7f --- /dev/null +++ b/cmake/conan.cmake @@ -0,0 +1,902 @@ +# The MIT License (MIT) + +# Copyright (c) 2018 JFrog + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + + +# This file comes from: https://github.com/conan-io/cmake-conan. Please refer +# to this repository for issues and documentation. + +# Its purpose is to wrap and launch Conan C/C++ Package Manager when cmake is called. +# It will take CMake current settings (os, compiler, compiler version, architecture) +# and translate them to conan settings for installing and retrieving dependencies. + +# It is intended to facilitate developers building projects that have conan dependencies, +# but it is only necessary on the end-user side. It is not necessary to create conan +# packages, in fact it shouldn't be use for that. Check the project documentation. + +# version: 0.17.0-dev + +include(CMakeParseArguments) + +function(_get_msvc_ide_version result) + set(${result} "" PARENT_SCOPE) + if(NOT MSVC_VERSION VERSION_LESS 1400 AND MSVC_VERSION VERSION_LESS 1500) + set(${result} 8 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1500 AND MSVC_VERSION VERSION_LESS 1600) + set(${result} 9 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1600 AND MSVC_VERSION VERSION_LESS 1700) + set(${result} 10 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1700 AND MSVC_VERSION VERSION_LESS 1800) + set(${result} 11 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1800 AND MSVC_VERSION VERSION_LESS 1900) + set(${result} 12 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1900 AND MSVC_VERSION VERSION_LESS 1910) + set(${result} 14 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1910 AND MSVC_VERSION VERSION_LESS 1920) + set(${result} 15 PARENT_SCOPE) + elseif(NOT MSVC_VERSION VERSION_LESS 1920 AND MSVC_VERSION VERSION_LESS 1930) + set(${result} 16 PARENT_SCOPE) + else() + message(FATAL_ERROR "Conan: Unknown MSVC compiler version [${MSVC_VERSION}]") + endif() +endfunction() + +macro(_conan_detect_build_type) + conan_parse_arguments(${ARGV}) + + if(ARGUMENTS_BUILD_TYPE) + set(_CONAN_SETTING_BUILD_TYPE ${ARGUMENTS_BUILD_TYPE}) + elseif(CMAKE_BUILD_TYPE) + set(_CONAN_SETTING_BUILD_TYPE ${CMAKE_BUILD_TYPE}) + else() + message(FATAL_ERROR "Please specify in command line CMAKE_BUILD_TYPE (-DCMAKE_BUILD_TYPE=Release)") + endif() + + string(TOUPPER ${_CONAN_SETTING_BUILD_TYPE} _CONAN_SETTING_BUILD_TYPE_UPPER) + if (_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "DEBUG") + set(_CONAN_SETTING_BUILD_TYPE "Debug") + elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "RELEASE") + set(_CONAN_SETTING_BUILD_TYPE "Release") + elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "RELWITHDEBINFO") + set(_CONAN_SETTING_BUILD_TYPE "RelWithDebInfo") + elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "MINSIZEREL") + set(_CONAN_SETTING_BUILD_TYPE "MinSizeRel") + endif() +endmacro() + +macro(_conan_check_system_name) + #handle -s os setting + if(CMAKE_SYSTEM_NAME AND NOT CMAKE_SYSTEM_NAME STREQUAL "Generic") + #use default conan os setting if CMAKE_SYSTEM_NAME is not defined + set(CONAN_SYSTEM_NAME ${CMAKE_SYSTEM_NAME}) + if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") + set(CONAN_SYSTEM_NAME Macos) + endif() + if(${CMAKE_SYSTEM_NAME} STREQUAL "QNX") + set(CONAN_SYSTEM_NAME Neutrino) + endif() + set(CONAN_SUPPORTED_PLATFORMS Windows Linux Macos Android iOS FreeBSD WindowsStore WindowsCE watchOS tvOS FreeBSD SunOS AIX Arduino Emscripten Neutrino) + list (FIND CONAN_SUPPORTED_PLATFORMS "${CONAN_SYSTEM_NAME}" _index) + if (${_index} GREATER -1) + #check if the cmake system is a conan supported one + set(_CONAN_SETTING_OS ${CONAN_SYSTEM_NAME}) + else() + message(FATAL_ERROR "cmake system ${CONAN_SYSTEM_NAME} is not supported by conan. Use one of ${CONAN_SUPPORTED_PLATFORMS}") + endif() + endif() +endmacro() + +macro(_conan_check_language) + get_property(_languages GLOBAL PROPERTY ENABLED_LANGUAGES) + if (";${_languages};" MATCHES ";CXX;") + set(LANGUAGE CXX) + set(USING_CXX 1) + elseif (";${_languages};" MATCHES ";C;") + set(LANGUAGE C) + set(USING_CXX 0) + else () + message(FATAL_ERROR "Conan: Neither C or C++ was detected as a language for the project. Unabled to detect compiler version.") + endif() +endmacro() + +macro(_conan_detect_compiler) + + conan_parse_arguments(${ARGV}) + + if(ARGUMENTS_ARCH) + set(_CONAN_SETTING_ARCH ${ARGUMENTS_ARCH}) + endif() + + if (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL GNU) + # using GCC + # TODO: Handle other params + string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION}) + list(GET VERSION_LIST 0 MAJOR) + list(GET VERSION_LIST 1 MINOR) + set(COMPILER_VERSION ${MAJOR}.${MINOR}) + if(${MAJOR} GREATER 4) + set(COMPILER_VERSION ${MAJOR}) + endif() + set(_CONAN_SETTING_COMPILER gcc) + set(_CONAN_SETTING_COMPILER_VERSION ${COMPILER_VERSION}) + if (USING_CXX) + conan_cmake_detect_unix_libcxx(_LIBCXX) + set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX}) + endif () + elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL Intel) + string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION}) + list(GET VERSION_LIST 0 MAJOR) + list(GET VERSION_LIST 1 MINOR) + set(COMPILER_VERSION ${MAJOR}.${MINOR}) + set(_CONAN_SETTING_COMPILER intel) + set(_CONAN_SETTING_COMPILER_VERSION ${COMPILER_VERSION}) + if (USING_CXX) + conan_cmake_detect_unix_libcxx(_LIBCXX) + set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX}) + endif () + elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL AppleClang) + # using AppleClang + string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION}) + list(GET VERSION_LIST 0 MAJOR) + list(GET VERSION_LIST 1 MINOR) + set(_CONAN_SETTING_COMPILER apple-clang) + set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}.${MINOR}) + if (USING_CXX) + conan_cmake_detect_unix_libcxx(_LIBCXX) + set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX}) + endif () + elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL Clang) + string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION}) + list(GET VERSION_LIST 0 MAJOR) + list(GET VERSION_LIST 1 MINOR) + set(_CONAN_SETTING_COMPILER clang) + set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}.${MINOR}) + if(APPLE) + cmake_policy(GET CMP0025 APPLE_CLANG_POLICY) + if(NOT APPLE_CLANG_POLICY STREQUAL NEW) + message(STATUS "Conan: APPLE and Clang detected. Assuming apple-clang compiler. Set CMP0025 to avoid it") + set(_CONAN_SETTING_COMPILER apple-clang) + endif() + endif() + if(${_CONAN_SETTING_COMPILER} STREQUAL clang AND ${MAJOR} GREATER 7) + set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}) + endif() + if (USING_CXX) + conan_cmake_detect_unix_libcxx(_LIBCXX) + set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX}) + endif () + elseif(${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL MSVC) + set(_VISUAL "Visual Studio") + _get_msvc_ide_version(_VISUAL_VERSION) + if("${_VISUAL_VERSION}" STREQUAL "") + message(FATAL_ERROR "Conan: Visual Studio not recognized") + else() + set(_CONAN_SETTING_COMPILER ${_VISUAL}) + set(_CONAN_SETTING_COMPILER_VERSION ${_VISUAL_VERSION}) + endif() + + if(NOT _CONAN_SETTING_ARCH) + if (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "64") + set(_CONAN_SETTING_ARCH x86_64) + elseif (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "^ARM") + message(STATUS "Conan: Using default ARM architecture from MSVC") + set(_CONAN_SETTING_ARCH armv6) + elseif (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "86") + set(_CONAN_SETTING_ARCH x86) + else () + message(FATAL_ERROR "Conan: Unknown MSVC architecture [${MSVC_${LANGUAGE}_ARCHITECTURE_ID}]") + endif() + endif() + + conan_cmake_detect_vs_runtime(_vs_runtime ${ARGV}) + message(STATUS "Conan: Detected VS runtime: ${_vs_runtime}") + set(_CONAN_SETTING_COMPILER_RUNTIME ${_vs_runtime}) + + if (CMAKE_GENERATOR_TOOLSET) + set(_CONAN_SETTING_COMPILER_TOOLSET ${CMAKE_VS_PLATFORM_TOOLSET}) + elseif(CMAKE_VS_PLATFORM_TOOLSET AND (CMAKE_GENERATOR STREQUAL "Ninja")) + set(_CONAN_SETTING_COMPILER_TOOLSET ${CMAKE_VS_PLATFORM_TOOLSET}) + endif() + else() + message(FATAL_ERROR "Conan: compiler setup not recognized") + endif() + +endmacro() + +function(conan_cmake_settings result) + #message(STATUS "COMPILER " ${CMAKE_CXX_COMPILER}) + #message(STATUS "COMPILER " ${CMAKE_CXX_COMPILER_ID}) + #message(STATUS "VERSION " ${CMAKE_CXX_COMPILER_VERSION}) + #message(STATUS "FLAGS " ${CMAKE_LANG_FLAGS}) + #message(STATUS "LIB ARCH " ${CMAKE_CXX_LIBRARY_ARCHITECTURE}) + #message(STATUS "BUILD TYPE " ${CMAKE_BUILD_TYPE}) + #message(STATUS "GENERATOR " ${CMAKE_GENERATOR}) + #message(STATUS "GENERATOR WIN64 " ${CMAKE_CL_64}) + + message(STATUS "Conan: Automatic detection of conan settings from cmake") + + conan_parse_arguments(${ARGV}) + + _conan_detect_build_type(${ARGV}) + + _conan_check_system_name() + + _conan_check_language() + + _conan_detect_compiler(${ARGV}) + + # If profile is defined it is used + if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND ARGUMENTS_DEBUG_PROFILE) + set(_APPLIED_PROFILES ${ARGUMENTS_DEBUG_PROFILE}) + elseif(CMAKE_BUILD_TYPE STREQUAL "Release" AND ARGUMENTS_RELEASE_PROFILE) + set(_APPLIED_PROFILES ${ARGUMENTS_RELEASE_PROFILE}) + elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" AND ARGUMENTS_RELWITHDEBINFO_PROFILE) + set(_APPLIED_PROFILES ${ARGUMENTS_RELWITHDEBINFO_PROFILE}) + elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel" AND ARGUMENTS_MINSIZEREL_PROFILE) + set(_APPLIED_PROFILES ${ARGUMENTS_MINSIZEREL_PROFILE}) + elseif(ARGUMENTS_PROFILE) + set(_APPLIED_PROFILES ${ARGUMENTS_PROFILE}) + endif() + + foreach(ARG ${_APPLIED_PROFILES}) + set(_SETTINGS ${_SETTINGS} -pr=${ARG}) + endforeach() + foreach(ARG ${ARGUMENTS_PROFILE_BUILD}) + conan_check(VERSION 1.24.0 REQUIRED DETECT_QUIET) + set(_SETTINGS ${_SETTINGS} -pr:b=${ARG}) + endforeach() + + if(NOT _SETTINGS OR ARGUMENTS_PROFILE_AUTO STREQUAL "ALL") + set(ARGUMENTS_PROFILE_AUTO arch build_type compiler compiler.version + compiler.runtime compiler.libcxx compiler.toolset) + endif() + + # remove any manually specified settings from the autodetected settings + foreach(ARG ${ARGUMENTS_SETTINGS}) + string(REGEX MATCH "[^=]*" MANUAL_SETTING "${ARG}") + message(STATUS "Conan: ${MANUAL_SETTING} was added as an argument. Not using the autodetected one.") + list(REMOVE_ITEM ARGUMENTS_PROFILE_AUTO "${MANUAL_SETTING}") + endforeach() + + # Automatic from CMake + foreach(ARG ${ARGUMENTS_PROFILE_AUTO}) + string(TOUPPER ${ARG} _arg_name) + string(REPLACE "." "_" _arg_name ${_arg_name}) + if(_CONAN_SETTING_${_arg_name}) + set(_SETTINGS ${_SETTINGS} -s ${ARG}=${_CONAN_SETTING_${_arg_name}}) + endif() + endforeach() + + foreach(ARG ${ARGUMENTS_SETTINGS}) + set(_SETTINGS ${_SETTINGS} -s ${ARG}) + endforeach() + + message(STATUS "Conan: Settings= ${_SETTINGS}") + + set(${result} ${_SETTINGS} PARENT_SCOPE) +endfunction() + + +function(conan_cmake_detect_unix_libcxx result) + # Take into account any -stdlib in compile options + get_directory_property(compile_options DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMPILE_OPTIONS) + string(GENEX_STRIP "${compile_options}" compile_options) + + # Take into account any _GLIBCXX_USE_CXX11_ABI in compile definitions + get_directory_property(defines DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMPILE_DEFINITIONS) + string(GENEX_STRIP "${defines}" defines) + + foreach(define ${defines}) + if(define MATCHES "_GLIBCXX_USE_CXX11_ABI") + if(define MATCHES "^-D") + set(compile_options ${compile_options} "${define}") + else() + set(compile_options ${compile_options} "-D${define}") + endif() + endif() + endforeach() + + # add additional compiler options ala cmRulePlaceholderExpander::ExpandRuleVariable + set(EXPAND_CXX_COMPILER ${CMAKE_CXX_COMPILER}) + if(CMAKE_CXX_COMPILER_ARG1) + # CMake splits CXX="foo bar baz" into CMAKE_CXX_COMPILER="foo", CMAKE_CXX_COMPILER_ARG1="bar baz" + # without this, ccache, winegcc, or other wrappers might lose all their arguments + separate_arguments(SPLIT_CXX_COMPILER_ARG1 NATIVE_COMMAND ${CMAKE_CXX_COMPILER_ARG1}) + list(APPEND EXPAND_CXX_COMPILER ${SPLIT_CXX_COMPILER_ARG1}) + endif() + + if(CMAKE_CXX_COMPILE_OPTIONS_TARGET AND CMAKE_CXX_COMPILER_TARGET) + # without --target= we may be calling the wrong underlying GCC + list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_TARGET}${CMAKE_CXX_COMPILER_TARGET}") + endif() + + if(CMAKE_CXX_COMPILE_OPTIONS_EXTERNAL_TOOLCHAIN AND CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN) + list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_EXTERNAL_TOOLCHAIN}${CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN}") + endif() + + if(CMAKE_CXX_COMPILE_OPTIONS_SYSROOT) + # without --sysroot= we may find the wrong #include + if(CMAKE_SYSROOT_COMPILE) + list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_SYSROOT}${CMAKE_SYSROOT_COMPILE}") + elseif(CMAKE_SYSROOT) + list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_SYSROOT}${CMAKE_SYSROOT}") + endif() + endif() + + separate_arguments(SPLIT_CXX_FLAGS NATIVE_COMMAND ${CMAKE_CXX_FLAGS}) + + if(CMAKE_OSX_SYSROOT) + set(xcode_sysroot_option "--sysroot=${CMAKE_OSX_SYSROOT}") + endif() + + execute_process( + COMMAND ${CMAKE_COMMAND} -E echo "#include " + COMMAND ${EXPAND_CXX_COMPILER} ${SPLIT_CXX_FLAGS} -x c++ ${xcode_sysroot_option} ${compile_options} -E -dM - + OUTPUT_VARIABLE string_defines + ) + + if(string_defines MATCHES "#define __GLIBCXX__") + # Allow -D_GLIBCXX_USE_CXX11_ABI=ON/OFF as argument to cmake + if(DEFINED _GLIBCXX_USE_CXX11_ABI) + if(_GLIBCXX_USE_CXX11_ABI) + set(${result} libstdc++11 PARENT_SCOPE) + return() + else() + set(${result} libstdc++ PARENT_SCOPE) + return() + endif() + endif() + + if(string_defines MATCHES "#define _GLIBCXX_USE_CXX11_ABI 1\n") + set(${result} libstdc++11 PARENT_SCOPE) + else() + # Either the compiler is missing the define because it is old, and so + # it can't use the new abi, or the compiler was configured to use the + # old abi by the user or distro (e.g. devtoolset on RHEL/CentOS) + set(${result} libstdc++ PARENT_SCOPE) + endif() + else() + set(${result} libc++ PARENT_SCOPE) + endif() +endfunction() + +function(conan_cmake_detect_vs_runtime result) + + conan_parse_arguments(${ARGV}) + if(ARGUMENTS_BUILD_TYPE) + set(build_type "${ARGUMENTS_BUILD_TYPE}") + elseif(CMAKE_BUILD_TYPE) + set(build_type "${CMAKE_BUILD_TYPE}") + else() + message(FATAL_ERROR "Please specify in command line CMAKE_BUILD_TYPE (-DCMAKE_BUILD_TYPE=Release)") + endif() + + if(build_type) + string(TOUPPER "${build_type}" build_type) + endif() + set(variables CMAKE_CXX_FLAGS_${build_type} CMAKE_C_FLAGS_${build_type} CMAKE_CXX_FLAGS CMAKE_C_FLAGS) + foreach(variable ${variables}) + if(NOT "${${variable}}" STREQUAL "") + string(REPLACE " " ";" flags "${${variable}}") + foreach (flag ${flags}) + if("${flag}" STREQUAL "/MD" OR "${flag}" STREQUAL "/MDd" OR "${flag}" STREQUAL "/MT" OR "${flag}" STREQUAL "/MTd") + string(SUBSTRING "${flag}" 1 -1 runtime) + set(${result} "${runtime}" PARENT_SCOPE) + return() + endif() + endforeach() + endif() + endforeach() + if("${build_type}" STREQUAL "DEBUG") + set(${result} "MDd" PARENT_SCOPE) + else() + set(${result} "MD" PARENT_SCOPE) + endif() +endfunction() + +function(_collect_settings result) + set(ARGUMENTS_PROFILE_AUTO arch build_type compiler compiler.version + compiler.runtime compiler.libcxx compiler.toolset) + foreach(ARG ${ARGUMENTS_PROFILE_AUTO}) + string(TOUPPER ${ARG} _arg_name) + string(REPLACE "." "_" _arg_name ${_arg_name}) + if(_CONAN_SETTING_${_arg_name}) + set(detected_setings ${detected_setings} ${ARG}=${_CONAN_SETTING_${_arg_name}}) + endif() + endforeach() + set(${result} ${detected_setings} PARENT_SCOPE) +endfunction() + +function(conan_cmake_autodetect detected_settings) + _conan_detect_build_type() + _conan_check_system_name() + _conan_check_language() + _conan_detect_compiler() + _collect_settings(collected_settings) + set(${detected_settings} ${collected_settings} PARENT_SCOPE) +endfunction() + +macro(conan_parse_arguments) + set(options BASIC_SETUP CMAKE_TARGETS UPDATE KEEP_RPATHS NO_LOAD NO_OUTPUT_DIRS OUTPUT_QUIET NO_IMPORTS SKIP_STD) + set(oneValueArgs CONANFILE ARCH BUILD_TYPE INSTALL_FOLDER CONAN_COMMAND) + set(multiValueArgs DEBUG_PROFILE RELEASE_PROFILE RELWITHDEBINFO_PROFILE MINSIZEREL_PROFILE + PROFILE REQUIRES OPTIONS IMPORTS SETTINGS BUILD ENV GENERATORS PROFILE_AUTO + INSTALL_ARGS CONFIGURATION_TYPES PROFILE_BUILD BUILD_REQUIRES) + cmake_parse_arguments(ARGUMENTS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) +endmacro() + +function(old_conan_cmake_install) + # Calls "conan install" + # Argument BUILD is equivalant to --build={missing, PkgName,...} or + # --build when argument is 'BUILD all' (which builds all packages from source) + # Argument CONAN_COMMAND, to specify the conan path, e.g. in case of running from source + # cmake does not identify conan as command, even if it is +x and it is in the path + conan_parse_arguments(${ARGV}) + + if(CONAN_CMAKE_MULTI) + set(ARGUMENTS_GENERATORS ${ARGUMENTS_GENERATORS} cmake_multi) + else() + set(ARGUMENTS_GENERATORS ${ARGUMENTS_GENERATORS} cmake) + endif() + + set(CONAN_BUILD_POLICY "") + foreach(ARG ${ARGUMENTS_BUILD}) + if(${ARG} STREQUAL "all") + set(CONAN_BUILD_POLICY ${CONAN_BUILD_POLICY} --build) + break() + else() + set(CONAN_BUILD_POLICY ${CONAN_BUILD_POLICY} --build=${ARG}) + endif() + endforeach() + if(ARGUMENTS_CONAN_COMMAND) + set(CONAN_CMD ${ARGUMENTS_CONAN_COMMAND}) + else() + conan_check(REQUIRED) + endif() + set(CONAN_OPTIONS "") + if(ARGUMENTS_CONANFILE) + if(IS_ABSOLUTE ${ARGUMENTS_CONANFILE}) + set(CONANFILE ${ARGUMENTS_CONANFILE}) + else() + set(CONANFILE ${CMAKE_CURRENT_SOURCE_DIR}/${ARGUMENTS_CONANFILE}) + endif() + else() + set(CONANFILE ".") + endif() + foreach(ARG ${ARGUMENTS_OPTIONS}) + set(CONAN_OPTIONS ${CONAN_OPTIONS} -o=${ARG}) + endforeach() + if(ARGUMENTS_UPDATE) + set(CONAN_INSTALL_UPDATE --update) + endif() + if(ARGUMENTS_NO_IMPORTS) + set(CONAN_INSTALL_NO_IMPORTS --no-imports) + endif() + set(CONAN_INSTALL_FOLDER "") + if(ARGUMENTS_INSTALL_FOLDER) + set(CONAN_INSTALL_FOLDER -if=${ARGUMENTS_INSTALL_FOLDER}) + endif() + foreach(ARG ${ARGUMENTS_GENERATORS}) + set(CONAN_GENERATORS ${CONAN_GENERATORS} -g=${ARG}) + endforeach() + foreach(ARG ${ARGUMENTS_ENV}) + set(CONAN_ENV_VARS ${CONAN_ENV_VARS} -e=${ARG}) + endforeach() + set(conan_args install ${CONANFILE} ${settings} ${CONAN_ENV_VARS} ${CONAN_GENERATORS} ${CONAN_BUILD_POLICY} ${CONAN_INSTALL_UPDATE} ${CONAN_INSTALL_NO_IMPORTS} ${CONAN_OPTIONS} ${CONAN_INSTALL_FOLDER} ${ARGUMENTS_INSTALL_ARGS}) + + string (REPLACE ";" " " _conan_args "${conan_args}") + message(STATUS "Conan executing: ${CONAN_CMD} ${_conan_args}") + + if(ARGUMENTS_OUTPUT_QUIET) + execute_process(COMMAND ${CONAN_CMD} ${conan_args} + RESULT_VARIABLE return_code + OUTPUT_VARIABLE conan_output + ERROR_VARIABLE conan_output + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + else() + execute_process(COMMAND ${CONAN_CMD} ${conan_args} + RESULT_VARIABLE return_code + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + endif() + + if(NOT "${return_code}" STREQUAL "0") + message(FATAL_ERROR "Conan install failed='${return_code}'") + endif() + +endfunction() + +function(conan_cmake_install) + if(DEFINED CONAN_COMMAND) + set(CONAN_CMD ${CONAN_COMMAND}) + else() + conan_check(REQUIRED) + endif() + + set(installOptions UPDATE NO_IMPORTS OUTPUT_QUIET ERROR_QUIET) + set(installOneValueArgs PATH_OR_REFERENCE REFERENCE REMOTE LOCKFILE LOCKFILE_OUT LOCKFILE_NODE_ID INSTALL_FOLDER) + set(installMultiValueArgs GENERATOR BUILD ENV ENV_HOST ENV_BUILD OPTIONS_HOST OPTIONS OPTIONS_BUILD PROFILE + PROFILE_HOST PROFILE_BUILD SETTINGS SETTINGS_HOST SETTINGS_BUILD) + cmake_parse_arguments(ARGS "${installOptions}" "${installOneValueArgs}" "${installMultiValueArgs}" ${ARGN}) + foreach(arg ${installOptions}) + if(ARGS_${arg}) + set(${arg} ${${arg}} ${ARGS_${arg}}) + endif() + endforeach() + foreach(arg ${installOneValueArgs}) + if(DEFINED ARGS_${arg}) + if("${arg}" STREQUAL "REMOTE") + set(flag "--remote") + elseif("${arg}" STREQUAL "LOCKFILE") + set(flag "--lockfile") + elseif("${arg}" STREQUAL "LOCKFILE_OUT") + set(flag "--lockfile-out") + elseif("${arg}" STREQUAL "LOCKFILE_NODE_ID") + set(flag "--lockfile-node-id") + elseif("${arg}" STREQUAL "INSTALL_FOLDER") + set(flag "--install-folder") + endif() + set(${arg} ${${arg}} ${flag} ${ARGS_${arg}}) + endif() + endforeach() + foreach(arg ${installMultiValueArgs}) + if(DEFINED ARGS_${arg}) + if("${arg}" STREQUAL "GENERATOR") + set(flag "--generator") + elseif("${arg}" STREQUAL "BUILD") + set(flag "--build") + elseif("${arg}" STREQUAL "ENV") + set(flag "--env") + elseif("${arg}" STREQUAL "ENV_HOST") + set(flag "--env:host") + elseif("${arg}" STREQUAL "ENV_BUILD") + set(flag "--env:build") + elseif("${arg}" STREQUAL "OPTIONS") + set(flag "--options") + elseif("${arg}" STREQUAL "OPTIONS_HOST") + set(flag "--options:host") + elseif("${arg}" STREQUAL "OPTIONS_BUILD") + set(flag "--options:build") + elseif("${arg}" STREQUAL "PROFILE") + set(flag "--profile") + elseif("${arg}" STREQUAL "PROFILE_HOST") + set(flag "--profile:host") + elseif("${arg}" STREQUAL "PROFILE_BUILD") + set(flag "--profile:build") + elseif("${arg}" STREQUAL "SETTINGS") + set(flag "--settings") + elseif("${arg}" STREQUAL "SETTINGS_HOST") + set(flag "--settings:host") + elseif("${arg}" STREQUAL "SETTINGS_BUILD") + set(flag "--settings:build") + endif() + list(LENGTH ARGS_${arg} numargs) + foreach(item ${ARGS_${arg}}) + if(${item} STREQUAL "all" AND ${arg} STREQUAL "BUILD") + set(${arg} "--build") + break() + endif() + set(${arg} ${${arg}} ${flag} ${item}) + endforeach() + endif() + endforeach() + if(DEFINED UPDATE) + set(UPDATE --update) + endif() + if(DEFINED NO_IMPORTS) + set(NO_IMPORTS --no-imports) + endif() + set(install_args install ${PATH_OR_REFERENCE} ${REFERENCE} ${UPDATE} ${NO_IMPORTS} ${REMOTE} ${LOCKFILE} ${LOCKFILE_OUT} ${LOCKFILE_NODE_ID} ${INSTALL_FOLDER} + ${GENERATOR} ${BUILD} ${ENV} ${ENV_HOST} ${ENV_BUILD} ${OPTIONS} ${OPTIONS_HOST} ${OPTIONS_BUILD} + ${PROFILE} ${PROFILE_HOST} ${PROFILE_BUILD} ${SETTINGS} ${SETTINGS_HOST} ${SETTINGS_BUILD}) + + string(REPLACE ";" " " _install_args "${install_args}") + message(STATUS "Conan executing: ${CONAN_CMD} ${_install_args}") + + if(ARGS_OUTPUT_QUIET) + set(OUTPUT_OPT OUTPUT_QUIET) + endif() + if(ARGS_ERROR_QUIET) + set(ERROR_OPT ERROR_QUIET) + endif() + + execute_process(COMMAND ${CONAN_CMD} ${install_args} + RESULT_VARIABLE return_code + ${OUTPUT_OPT} + ${ERROR_OPT} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + + if(NOT "${return_code}" STREQUAL "0") + if (ARGS_ERROR_QUIET) + message(WARNING "Conan install failed='${return_code}'") + else() + message(FATAL_ERROR "Conan install failed='${return_code}'") + endif() + endif() + +endfunction() + +function(conan_cmake_setup_conanfile) + conan_parse_arguments(${ARGV}) + if(ARGUMENTS_CONANFILE) + get_filename_component(_CONANFILE_NAME ${ARGUMENTS_CONANFILE} NAME) + # configure_file will make sure cmake re-runs when conanfile is updated + configure_file(${ARGUMENTS_CONANFILE} ${CMAKE_CURRENT_BINARY_DIR}/${_CONANFILE_NAME}.junk COPYONLY) + file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/${_CONANFILE_NAME}.junk) + else() + conan_cmake_generate_conanfile(ON ${ARGV}) + endif() +endfunction() + +function(conan_cmake_configure) + conan_cmake_generate_conanfile(OFF ${ARGV}) +endfunction() + +# Generate, writing in disk a conanfile.txt with the requires, options, and imports +# specified as arguments +# This will be considered as temporary file, generated in CMAKE_CURRENT_BINARY_DIR) +function(conan_cmake_generate_conanfile DEFAULT_GENERATOR) + + conan_parse_arguments(${ARGV}) + + set(_FN "${CMAKE_CURRENT_BINARY_DIR}/conanfile.txt") + file(WRITE ${_FN} "") + + if(DEFINED ARGUMENTS_REQUIRES) + file(APPEND ${_FN} "[requires]\n") + foreach(REQUIRE ${ARGUMENTS_REQUIRES}) + file(APPEND ${_FN} ${REQUIRE} "\n") + endforeach() + endif() + + if (DEFAULT_GENERATOR OR DEFINED ARGUMENTS_GENERATORS) + file(APPEND ${_FN} "[generators]\n") + if (DEFAULT_GENERATOR) + file(APPEND ${_FN} "cmake\n") + endif() + if (DEFINED ARGUMENTS_GENERATORS) + foreach(GENERATOR ${ARGUMENTS_GENERATORS}) + file(APPEND ${_FN} ${GENERATOR} "\n") + endforeach() + endif() + endif() + + if(DEFINED ARGUMENTS_BUILD_REQUIRES) + file(APPEND ${_FN} "[build_requires]\n") + foreach(BUILD_REQUIRE ${ARGUMENTS_BUILD_REQUIRES}) + file(APPEND ${_FN} ${BUILD_REQUIRE} "\n") + endforeach() + endif() + + if(DEFINED ARGUMENTS_IMPORTS) + file(APPEND ${_FN} "[imports]\n") + foreach(IMPORTS ${ARGUMENTS_IMPORTS}) + file(APPEND ${_FN} ${IMPORTS} "\n") + endforeach() + endif() + + if(DEFINED ARGUMENTS_OPTIONS) + file(APPEND ${_FN} "[options]\n") + foreach(OPTION ${ARGUMENTS_OPTIONS}) + file(APPEND ${_FN} ${OPTION} "\n") + endforeach() + endif() + +endfunction() + + +macro(conan_load_buildinfo) + if(CONAN_CMAKE_MULTI) + set(_CONANBUILDINFO conanbuildinfo_multi.cmake) + else() + set(_CONANBUILDINFO conanbuildinfo.cmake) + endif() + if(ARGUMENTS_INSTALL_FOLDER) + set(_CONANBUILDINFOFOLDER ${ARGUMENTS_INSTALL_FOLDER}) + else() + set(_CONANBUILDINFOFOLDER ${CMAKE_CURRENT_BINARY_DIR}) + endif() + # Checks for the existence of conanbuildinfo.cmake, and loads it + # important that it is macro, so variables defined at parent scope + if(EXISTS "${_CONANBUILDINFOFOLDER}/${_CONANBUILDINFO}") + message(STATUS "Conan: Loading ${_CONANBUILDINFO}") + include(${_CONANBUILDINFOFOLDER}/${_CONANBUILDINFO}) + else() + message(FATAL_ERROR "${_CONANBUILDINFO} doesn't exist in ${CMAKE_CURRENT_BINARY_DIR}") + endif() +endmacro() + + +macro(conan_cmake_run) + conan_parse_arguments(${ARGV}) + + if(ARGUMENTS_CONFIGURATION_TYPES AND NOT CMAKE_CONFIGURATION_TYPES) + message(WARNING "CONFIGURATION_TYPES should only be specified for multi-configuration generators") + elseif(ARGUMENTS_CONFIGURATION_TYPES AND ARGUMENTS_BUILD_TYPE) + message(WARNING "CONFIGURATION_TYPES and BUILD_TYPE arguments should not be defined at the same time.") + endif() + + if(CMAKE_CONFIGURATION_TYPES AND NOT CMAKE_BUILD_TYPE AND NOT CONAN_EXPORTED + AND NOT ARGUMENTS_BUILD_TYPE) + set(CONAN_CMAKE_MULTI ON) + if (NOT ARGUMENTS_CONFIGURATION_TYPES) + set(ARGUMENTS_CONFIGURATION_TYPES "Release;Debug") + endif() + message(STATUS "Conan: Using cmake-multi generator") + else() + set(CONAN_CMAKE_MULTI OFF) + endif() + + if(NOT CONAN_EXPORTED) + conan_cmake_setup_conanfile(${ARGV}) + if(CONAN_CMAKE_MULTI) + foreach(CMAKE_BUILD_TYPE ${ARGUMENTS_CONFIGURATION_TYPES}) + set(ENV{CONAN_IMPORT_PATH} ${CMAKE_BUILD_TYPE}) + conan_cmake_settings(settings ${ARGV}) + old_conan_cmake_install(SETTINGS ${settings} ${ARGV}) + endforeach() + set(CMAKE_BUILD_TYPE) + else() + conan_cmake_settings(settings ${ARGV}) + old_conan_cmake_install(SETTINGS ${settings} ${ARGV}) + endif() + endif() + + if (NOT ARGUMENTS_NO_LOAD) + conan_load_buildinfo() + endif() + + if(ARGUMENTS_BASIC_SETUP) + foreach(_option CMAKE_TARGETS KEEP_RPATHS NO_OUTPUT_DIRS SKIP_STD) + if(ARGUMENTS_${_option}) + if(${_option} STREQUAL "CMAKE_TARGETS") + list(APPEND _setup_options "TARGETS") + else() + list(APPEND _setup_options ${_option}) + endif() + endif() + endforeach() + conan_basic_setup(${_setup_options}) + endif() +endmacro() + +macro(conan_check) + # Checks conan availability in PATH + # Arguments REQUIRED, DETECT_QUIET and VERSION are optional + # Example usage: + # conan_check(VERSION 1.0.0 REQUIRED) + set(options REQUIRED DETECT_QUIET) + set(oneValueArgs VERSION) + cmake_parse_arguments(CONAN "${options}" "${oneValueArgs}" "" ${ARGN}) + if(NOT CONAN_DETECT_QUIET) + message(STATUS "Conan: checking conan executable") + endif() + + find_program(CONAN_CMD conan) + if(NOT CONAN_CMD AND CONAN_REQUIRED) + message(FATAL_ERROR "Conan executable not found! Please install conan.") + endif() + if(NOT CONAN_DETECT_QUIET) + message(STATUS "Conan: Found program ${CONAN_CMD}") + endif() + execute_process(COMMAND ${CONAN_CMD} --version + RESULT_VARIABLE return_code + OUTPUT_VARIABLE CONAN_VERSION_OUTPUT + ERROR_VARIABLE CONAN_VERSION_OUTPUT) + + if(NOT "${return_code}" STREQUAL "0") + message(FATAL_ERROR "Conan --version failed='${return_code}'") + endif() + + if(NOT CONAN_DETECT_QUIET) + string(STRIP "${CONAN_VERSION_OUTPUT}" _CONAN_VERSION_OUTPUT) + message(STATUS "Conan: Version found ${_CONAN_VERSION_OUTPUT}") + endif() + + if(DEFINED CONAN_VERSION) + string(REGEX MATCH ".*Conan version ([0-9]+\\.[0-9]+\\.[0-9]+)" FOO + "${CONAN_VERSION_OUTPUT}") + if(${CMAKE_MATCH_1} VERSION_LESS ${CONAN_VERSION}) + message(FATAL_ERROR "Conan outdated. Installed: ${CMAKE_MATCH_1}, \ + required: ${CONAN_VERSION}. Consider updating via 'pip \ + install conan==${CONAN_VERSION}'.") + endif() + endif() +endmacro() + +function(conan_add_remote) + # Adds a remote + # Arguments URL and NAME are required, INDEX, COMMAND and VERIFY_SSL are optional + # Example usage: + # conan_add_remote(NAME bincrafters INDEX 1 + # URL https://api.bintray.com/conan/bincrafters/public-conan + # VERIFY_SSL True) + set(oneValueArgs URL NAME INDEX COMMAND VERIFY_SSL) + cmake_parse_arguments(CONAN "" "${oneValueArgs}" "" ${ARGN}) + + if(DEFINED CONAN_INDEX) + set(CONAN_INDEX_ARG "-i ${CONAN_INDEX}") + endif() + if(DEFINED CONAN_COMMAND) + set(CONAN_CMD ${CONAN_COMMAND}) + else() + conan_check(REQUIRED DETECT_QUIET) + endif() + set(CONAN_VERIFY_SSL_ARG "True") + if(DEFINED CONAN_VERIFY_SSL) + set(CONAN_VERIFY_SSL_ARG ${CONAN_VERIFY_SSL}) + endif() + message(STATUS "Conan: Adding ${CONAN_NAME} remote repository (${CONAN_URL}) verify ssl (${CONAN_VERIFY_SSL_ARG})") + execute_process(COMMAND ${CONAN_CMD} remote add ${CONAN_NAME} ${CONAN_INDEX_ARG} -f ${CONAN_URL} ${CONAN_VERIFY_SSL_ARG} + RESULT_VARIABLE return_code) + if(NOT "${return_code}" STREQUAL "0") + message(FATAL_ERROR "Conan remote failed='${return_code}'") + endif() +endfunction() + +macro(conan_config_install) + # install a full configuration from a local or remote zip file + # Argument ITEM is required, arguments TYPE, SOURCE, TARGET and VERIFY_SSL are optional + # Example usage: + # conan_config_install(ITEM https://github.com/conan-io/cmake-conan.git + # TYPE git SOURCE source-folder TARGET target-folder VERIFY_SSL false) + set(oneValueArgs ITEM TYPE SOURCE TARGET VERIFY_SSL) + set(multiValueArgs ARGS) + cmake_parse_arguments(CONAN "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + find_program(CONAN_CMD conan) + if(NOT CONAN_CMD AND CONAN_REQUIRED) + message(FATAL_ERROR "Conan executable not found!") + endif() + + if(DEFINED CONAN_VERIFY_SSL) + set(CONAN_VERIFY_SSL_ARG "--verify-ssl=${CONAN_VERIFY_SSL}") + endif() + + if(DEFINED CONAN_TYPE) + set(CONAN_TYPE_ARG "--type=${CONAN_TYPE}") + endif() + + if(DEFINED CONAN_ARGS) + set(CONAN_ARGS_ARGS "--args=\"${CONAN_ARGS}\"") + endif() + + if(DEFINED CONAN_SOURCE) + set(CONAN_SOURCE_ARGS "--source-folder=${CONAN_SOURCE}") + endif() + + if(DEFINED CONAN_TARGET) + set(CONAN_TARGET_ARGS "--target-folder=${CONAN_TARGET}") + endif() + + set (CONAN_CONFIG_INSTALL_ARGS ${CONAN_VERIFY_SSL_ARG} + ${CONAN_TYPE_ARG} + ${CONAN_ARGS_ARGS} + ${CONAN_SOURCE_ARGS} + ${CONAN_TARGET_ARGS}) + + message(STATUS "Conan: Installing config from ${CONAN_ITEM}") + execute_process(COMMAND ${CONAN_CMD} config install ${CONAN_ITEM} ${CONAN_CONFIG_INSTALL_ARGS} + RESULT_VARIABLE return_code) + if(NOT "${return_code}" STREQUAL "0") + message(FATAL_ERROR "Conan config failed='${return_code}'") + endif() +endmacro() diff --git a/cmd/pktvisor-pcap/CMakeLists.txt b/cmd/pktvisor-pcap/CMakeLists.txt index 9602fdfe1..79fba690a 100644 --- a/cmd/pktvisor-pcap/CMakeLists.txt +++ b/cmd/pktvisor-pcap/CMakeLists.txt @@ -2,11 +2,11 @@ add_executable(pktvisor-pcap main.cpp) target_include_directories(pktvisor-pcap PRIVATE - ${CMAKE_BINARY_DIR}/src # Vizer::Core config.h + ${CMAKE_BINARY_DIR}/src # Visor::Core config.h ) target_link_libraries(pktvisor-pcap PRIVATE ${CONAN_LIBS_DOCOPT.CPP} - Vizer::Handler::Net - Vizer::Handler::Dns) + Visor::Handler::Net + Visor::Handler::Dns) diff --git a/cmd/pktvisor-pcap/main.cpp b/cmd/pktvisor-pcap/main.cpp index c2eb639cc..005cd2ae9 100644 --- a/cmd/pktvisor-pcap/main.cpp +++ b/cmd/pktvisor-pcap/main.cpp @@ -19,7 +19,7 @@ #include "handlers/static_plugins.h" #include "inputs/static_plugins.h" -#include "vizer_config.h" +#include "visor_config.h" #include "GeoDB.h" #include "handlers/dns/DnsStreamHandler.h" @@ -58,7 +58,7 @@ void signal_handler(int signal) } } -using namespace vizer; +using namespace visor; typedef Corrade::PluginManager::Manager InputPluginRegistry; typedef Corrade::PluginManager::Manager HandlerPluginRegistry; @@ -82,7 +82,7 @@ int main(int argc, char *argv[]) std::map args = docopt::docopt(USAGE, {argv + 1, argv + argc}, true, // show help if requested - VIZER_VERSION); // version string + VISOR_VERSION); // version string auto logger = spdlog::stderr_color_mt("pktvisor"); if (args["-v"].asBool()) { @@ -172,6 +172,7 @@ int main(int argc, char *argv[]) handler::net::NetStreamHandler *net_handler{nullptr}; { auto handler_module = std::make_unique("net", pcap_stream, periods, sample_rate); + handler_module->config_set("recorded_stream", true); handler_manager->module_add(std::move(handler_module)); auto [handler, handler_mgr_lock] = handler_manager->module_get_locked("net"); handler_mgr_lock.unlock(); @@ -180,6 +181,7 @@ int main(int argc, char *argv[]) handler::dns::DnsStreamHandler *dns_handler{nullptr}; { auto handler_module = std::make_unique("dns", pcap_stream, periods, sample_rate); + handler_module->config_set("recorded_stream", true); handler_manager->module_add(std::move(handler_module)); auto [handler, handler_mgr_lock] = handler_manager->module_get_locked("dns"); handler_mgr_lock.unlock(); diff --git a/cmd/pktvisord/CMakeLists.txt b/cmd/pktvisord/CMakeLists.txt index 86dbbae11..d0e737e4a 100644 --- a/cmd/pktvisord/CMakeLists.txt +++ b/cmd/pktvisord/CMakeLists.txt @@ -2,7 +2,7 @@ add_executable(pktvisord main.cpp) target_include_directories(pktvisord PRIVATE - ${CMAKE_BINARY_DIR}/src # Vizer::Core config.h + ${CMAKE_BINARY_DIR}/src # Visor::Core config.h ) target_link_libraries(pktvisord @@ -10,6 +10,6 @@ target_link_libraries(pktvisord timer resolv ${CONAN_LIBS_DOCOPT.CPP} - Vizer::Core - ${VIZER_STATIC_PLUGINS} + Visor::Core + ${VISOR_STATIC_PLUGINS} ) diff --git a/cmd/pktvisord/main.cpp b/cmd/pktvisord/main.cpp index bd57f491e..f8c53680c 100644 --- a/cmd/pktvisord/main.cpp +++ b/cmd/pktvisord/main.cpp @@ -8,10 +8,12 @@ #include "CoreServer.h" #include "handlers/static_plugins.h" #include "inputs/static_plugins.h" -#include "vizer_config.h" +#include "visor_config.h" #include #include +#include #include +#include #include #include "GeoDB.h" @@ -31,7 +33,6 @@ static const char USAGE[] = IFACE, if specified, is either a network interface or an IP address (4 or 6). If this is specified, a "pcap" input stream will be automatically created, with "net" and "dns" handler modules attached. - ** Note that this is deprecated; you should instead use --admin-api and create the pcap input stream via API. Base Options: -l HOST Run webserver on the given host or IP [default: localhost] @@ -39,16 +40,23 @@ static const char USAGE[] = --admin-api Enable admin REST API giving complete control plane functionality [default: false] When not specified, the exposed API is read-only access to summarized metrics. When specified, write access is enabled for all modules. + -d Daemonize; fork and continue running in the background [default: false] -h --help Show this screen -v Verbose log output --no-track Don't send lightweight, anonymous usage metrics. --version Show version - --geo-city FILE GeoLite2 City database to use for IP to Geo mapping (if enabled) - --geo-asn FILE GeoLite2 ASN database to use for IP to ASN mapping (if enabled) + --geo-city FILE GeoLite2 City database to use for IP to Geo mapping + --geo-asn FILE GeoLite2 ASN database to use for IP to ASN mapping + Logging Options: + --log-file FILE Log to the given output file name + --syslog Log to syslog + Prometheus Options: + --prometheus Enable native Prometheus metrics at path /metrics + --prom-instance ID Optionally set the 'instance' label to ID Handler Module Defaults: --max-deep-sample N Never deep sample more than N% of streams (an int between 0 and 100) [default: 100] --periods P Hold this many 60 second time periods of history in memory [default: 5] - pcap Input Module Options (deprecated, use admin-api instead): + pcap Input Module Options: -b BPF Filter packets using the given BPF string -H HOSTSPEC Specify subnets (comma separated) to consider HOST, in CIDR form. In live capture this /may/ be detected automatically from capture device but /must/ be specified for pcaps. Example: "10.0.1.0/24,10.0.2.1/32,2001:db8::/64" @@ -63,7 +71,7 @@ void signal_handler(int signal) } } -using namespace vizer; +using namespace visor; void initialize_geo(const docopt::value &city, const docopt::value &asn) { @@ -75,20 +83,110 @@ void initialize_geo(const docopt::value &city, const docopt::value &asn) } } +// adapted from LPI becomeDaemon() +int daemonize() +{ + switch (fork()) { + case -1: + return -1; + case 0: + // Child falls through... + break; + default: + // while parent terminates + _exit(EXIT_SUCCESS); + } + + // Become leader of new session + if (setsid() == -1) { + return -1; + } + + // Ensure we are not session leader + switch (auto pid = fork()) { + case -1: + return -1; + case 0: + break; + default: + std::cerr << "pktvisord running at PID " << pid << std::endl; + _exit(EXIT_SUCCESS); + } + + // Clear file mode creation mask + umask(0); + + // Change to root directory + chdir("/"); + int maxfd, fd; + maxfd = sysconf(_SC_OPEN_MAX); + // Limit is indeterminate... + if (maxfd == -1) { + maxfd = 8192; // so take a guess + } + + for (fd = 0; fd < maxfd; fd++) { + close(fd); + } + + // Reopen standard fd's to /dev/null + close(STDIN_FILENO); + + fd = open("/dev/null", O_RDWR); + + if (fd != STDIN_FILENO) { + return -1; + } + if (dup2(STDIN_FILENO, STDOUT_FILENO) != STDOUT_FILENO) { + return -1; + } + if (dup2(STDIN_FILENO, STDERR_FILENO) != STDERR_FILENO) { + return -1; + } + + return 0; +} + int main(int argc, char *argv[]) { std::map args = docopt::docopt(USAGE, {argv + 1, argv + argc}, true, // show help if requested - VIZER_VERSION); // version string + VISOR_VERSION); // version string - auto logger = spdlog::stdout_color_mt("pktvisor"); + if (args["-d"].asBool()) { + if (daemonize()) { + std::cerr << "failed to daemonize" << std::endl; + exit(EXIT_FAILURE); + } + } + + std::shared_ptr logger; + if (args["--log-file"]) { + try { + logger = spdlog::basic_logger_mt("pktvisor", args["--log-file"].asString()); + } catch (const spdlog::spdlog_ex &ex) { + std::cerr << "Log init failed: " << ex.what() << std::endl; + exit(EXIT_FAILURE); + } + } else if (args["--syslog"].asBool()) { + logger = spdlog::syslog_logger_mt("pktvisor", "pktvisord", LOG_PID); + } else { + logger = spdlog::stdout_color_mt("pktvisor"); + } if (args["-v"].asBool()) { logger->set_level(spdlog::level::debug); } - CoreServer svr(!args["--admin-api"].asBool(), logger); + PrometheusConfig prom_config; + if (args["--prometheus"].asBool()) { + prom_config.path = "/metrics"; + if (args["--prom-instance"]) { + prom_config.instance = args["--prom-instance"].asString(); + } + } + CoreServer svr(!args["--admin-api"].asBool(), logger, prom_config); svr.set_http_logger([&logger](const auto &req, const auto &res) { logger->info("REQUEST: {} {} {}", req.method, req.path, res.status); if (res.status == 500) { @@ -120,7 +218,7 @@ int main(int argc, char *argv[]) std::shared_ptr timer_handle; auto usage_metrics = [&logger] { u_char buf[1024]; - std::string version_str{VIZER_VERSION_NUM}; + std::string version_str{VISOR_VERSION_NUM}; std::reverse(version_str.begin(), version_str.end()); std::string target = version_str + ".pktvisord.metrics.pktvisor.dev."; logger->info("sending anonymous usage metrics (once/day, use --no-track to disable): {}", target); @@ -142,7 +240,7 @@ int main(int argc, char *argv[]) initialize_geo(args["--geo-city"], args["--geo-asn"]); } catch (const std::exception &e) { logger->error("Fatal error: {}", e.what()); - exit(-1); + exit(EXIT_FAILURE); } if (args["IFACE"]) { @@ -186,21 +284,21 @@ int main(int argc, char *argv[]) } catch (const std::exception &e) { logger->error(e.what()); - exit(-1); + exit(EXIT_FAILURE); } } else if (!args["--admin-api"].asBool()) { // if they didn't specify pcap target, or config file, or admin api then there is nothing to do logger->error("Nothing to do: specify --admin-api or IFACE."); std::cerr << USAGE << std::endl; - exit(-1); + exit(EXIT_FAILURE); } try { svr.start(host.c_str(), port); } catch (const std::exception &e) { logger->error(e.what()); - exit(-1); + exit(EXIT_FAILURE); } - return 0; + exit(EXIT_SUCCESS); } diff --git a/conanfile.txt b/conanfile.txt index b44c34508..dc69654b9 100644 --- a/conanfile.txt +++ b/conanfile.txt @@ -1,14 +1,20 @@ [requires] spdlog/1.8.2 sigslot/1.2.0 -catch2/2.13.4 docopt.cpp/0.6.3 nlohmann_json/3.9.1 cpp-httplib/0.8.0 corrade/2020.06 +pcapplusplus/ns1-dev +json-schema-validator/2.1.0 + +[build_requires] +benchmark/1.5.2 +catch2/2.13.4 [options] corrade:with_pluginmanager=True +pcapplusplus:immediate_mode=True [generators] cmake diff --git a/docker/Dockerfile b/docker/Dockerfile index 3e5bc2260..31046b12d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,44 +1,39 @@ FROM debian:bullseye-slim AS cppbuild -ENV BUILD_DEPS "g++ cmake make git libpcap-dev pkgconf libmaxminddb-dev jq python3-pip python3-setuptools" +ENV BUILD_DEPS "g++ cmake make git pkgconf jq python3-pip python3-setuptools ca-certificates" RUN \ apt-get update && \ - apt-get install --yes --no-install-recommends ${BUILD_DEPS} && \ + apt-get upgrade --yes --force-yes && \ + apt-get install --yes --force-yes --no-install-recommends ${BUILD_DEPS} && \ pip3 install conan -RUN \ - mkdir /local && \ - cd /tmp && \ - git clone https://github.com/ns1/PcapPlusPlus.git && \ - cd /tmp/PcapPlusPlus && \ - ./configure-linux.sh --install-dir /local && \ - make libs -j 4 && \ - make install - COPY . /pktvisor-src/ WORKDIR /tmp/build RUN \ conan profile new --detect default && \ conan profile update settings.compiler.libcxx=libstdc++11 default && \ - conan install /pktvisor-src + conan config set general.revisions_enabled=1 + RUN \ - PKG_CONFIG_PATH=/local/lib/pkgconfig cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo /pktvisor-src && \ + PKG_CONFIG_PATH=/local/lib/pkgconfig cmake -DCMAKE_BUILD_TYPE=Release /pktvisor-src && \ make all test -j 4 FROM golang:latest AS gobuild COPY golang/ /src/ WORKDIR /src/ +COPY --from=cppbuild /pktvisor-src/golang/pkg/client/version.go /src/pkg/client/version.go RUN go build -o pktvisor-cli cmd/pktvisor-cli/main.go FROM debian:bullseye-slim AS runtime -ENV RUNTIME_DEPS "curl libpcap0.8 libmaxminddb0" +ENV RUNTIME_DEPS "curl ca-certificates" RUN \ apt-get update && \ - apt-get install --yes --no-install-recommends ${RUNTIME_DEPS} && \ + apt-get upgrade --yes --force-yes && \ + apt-get install --yes --force-yes --no-install-recommends ${RUNTIME_DEPS} && \ rm -rf /var/lib/apt COPY --from=cppbuild /tmp/build/bin/pktvisord /usr/local/sbin/pktvisord diff --git a/docker/entry.sh b/docker/entry.sh index e6270244c..f145dd31d 100755 --- a/docker/entry.sh +++ b/docker/entry.sh @@ -5,8 +5,8 @@ set -e export PATH=$PATH:/usr/local/bin/:/usr/local/sbin/ if [ $# -eq 0 ]; then - echo "No arguments provided: specify either 'pktvisor-cli','pktvisor-pcap' or 'pktvisord'. Try:" - echo "docker run ns1labs/pktvisor pktvisor-cli --help" + echo "No arguments provided: specify either 'pktvisor-cli', 'pktvisor-pcap' or 'pktvisord'. Try:" + echo "docker run ns1labs/pktvisor pktvisor-cli -h" echo "docker run ns1labs/pktvisor pktvisor-pcap --help" echo "docker run ns1labs/pktvisor pktvisord --help" exit 1 diff --git a/docs/CONTROL_PLANE.md b/docs/CONTROL_PLANE.md new file mode 100644 index 000000000..dc2f4e2d5 --- /dev/null +++ b/docs/CONTROL_PLANE.md @@ -0,0 +1,114 @@ +# Control Plane + +**_Draft_** + +pktvisord exposes a control plane over REST API. + +## Discovery + +pktvisord exposes a method for discovering the available modules, their configurable properties, and their associated +metrics schema. + +All interfaces and schemas are versioned. + +``` +/api/v1/inputs + { + pcap: "1.0", + "dnstap": "1.0" + } +/api/v1/inputs/pcap/interface + { + version: "1.0", + "info": { + "interfaces": { + "eth0": {} + } + }, + "defaults": { + "interface": eth0 + }, + config: { + iface: { + type: "string", + description: "the ethernet interface to capture on" + } + } + filters: { + bpf: { + type: "string", + description: "tcpdump compatible bpf filter expression" + } + }, + metric_groups: { + } + } +/api/v1/inputs/dnstap/interface + { + version: "1.0", + config: { + socket: { + type: "string", + description: "the dnstap socket to listen to" + } + } + filters: { + qname_suffix: { + type: "string", + description: "match the DNS qname sufix given", + regex: "..." + } + }, + metric_groups: { + } + } +/api/v1/handlers + { dns: { version: "1.0" }, + net: { version: "1.0" } } +/api/v1/handlers/dns/interface + { + version: "1.0", + config: { + periods: { + type: "int", + description: "number of metric periods to keep" + } + } + filters: { + qname_suffix: { + type: "string", + description: "match the DNS qname sufix given", + regex: "..." + } + }, + metric_groups: { + top_error_qnames: { + description: "top N qnames with error result codes", + metrics: {, + top_refused: { + "type": "top_n", + "description": "..." + }, + top_srvfail: { + "type": "top_n", + "description": "..." + }, + top_nxdomain: { + "type": "top_n", + "description": "..." + }, + } + }, + transactions: { + description: "information on query/reply pairs", + metrics: { + ... + } + } + } + } +/api/v1/handlers/net/interface + { + } +``` + diff --git a/docs/Doxyfile b/docs/Doxyfile new file mode 100644 index 000000000..a90997659 --- /dev/null +++ b/docs/Doxyfile @@ -0,0 +1,391 @@ +# Doxyfile 1.9.1 + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- +DOXYFILE_ENCODING = UTF-8 +PROJECT_NAME = pktvisor +PROJECT_NUMBER = +PROJECT_BRIEF = "pktvisor summarizes network data streams in real time, enabling on-node and centralized data visibility and analysis via API" +PROJECT_LOGO = +OUTPUT_DIRECTORY = ./internals +CREATE_SUBDIRS = NO +ALLOW_UNICODE_NAMES = NO +OUTPUT_LANGUAGE = English +OUTPUT_TEXT_DIRECTION = None +BRIEF_MEMBER_DESC = YES +REPEAT_BRIEF = YES +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the +ALWAYS_DETAILED_SEC = NO +INLINE_INHERITED_MEMB = NO +FULL_PATH_NAMES = YES +STRIP_FROM_PATH = +STRIP_FROM_INC_PATH = +SHORT_NAMES = NO +JAVADOC_AUTOBRIEF = NO +JAVADOC_BANNER = NO +QT_AUTOBRIEF = NO +MULTILINE_CPP_IS_BRIEF = NO +PYTHON_DOCSTRING = YES +INHERIT_DOCS = YES +SEPARATE_MEMBER_PAGES = NO +TAB_SIZE = 4 +ALIASES = +OPTIMIZE_OUTPUT_FOR_C = NO +OPTIMIZE_OUTPUT_JAVA = NO +OPTIMIZE_FOR_FORTRAN = NO +OPTIMIZE_OUTPUT_VHDL = NO +OPTIMIZE_OUTPUT_SLICE = NO +EXTENSION_MAPPING = +MARKDOWN_SUPPORT = YES +TOC_INCLUDE_HEADINGS = 5 +AUTOLINK_SUPPORT = YES +BUILTIN_STL_SUPPORT = NO +CPP_CLI_SUPPORT = NO +SIP_SUPPORT = NO +IDL_PROPERTY_SUPPORT = YES +DISTRIBUTE_GROUP_DOC = NO +GROUP_NESTED_COMPOUNDS = NO +SUBGROUPING = YES +INLINE_GROUPED_CLASSES = NO +INLINE_SIMPLE_STRUCTS = NO +TYPEDEF_HIDES_STRUCT = NO +LOOKUP_CACHE_SIZE = 0 +NUM_PROC_THREADS = 1 +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- +EXTRACT_ALL = YES +EXTRACT_PRIVATE = NO +EXTRACT_PRIV_VIRTUAL = NO +EXTRACT_PACKAGE = NO +EXTRACT_STATIC = NO +EXTRACT_LOCAL_CLASSES = YES +EXTRACT_LOCAL_METHODS = NO +EXTRACT_ANON_NSPACES = NO +RESOLVE_UNNAMED_PARAMS = YES +HIDE_UNDOC_MEMBERS = NO +HIDE_UNDOC_CLASSES = NO +HIDE_FRIEND_COMPOUNDS = NO +HIDE_IN_BODY_DOCS = NO +INTERNAL_DOCS = NO +CASE_SENSE_NAMES = NO +HIDE_SCOPE_NAMES = NO +HIDE_COMPOUND_REFERENCE= NO +SHOW_INCLUDE_FILES = YES +SHOW_GROUPED_MEMB_INC = NO +FORCE_LOCAL_INCLUDES = NO +INLINE_INFO = YES +SORT_MEMBER_DOCS = YES +SORT_BRIEF_DOCS = NO +SORT_MEMBERS_CTORS_1ST = NO +SORT_GROUP_NAMES = NO +SORT_BY_SCOPE_NAME = NO +STRICT_PROTO_MATCHING = NO +GENERATE_TODOLIST = YES +GENERATE_TESTLIST = YES +GENERATE_BUGLIST = YES +GENERATE_DEPRECATEDLIST= YES +ENABLED_SECTIONS = +MAX_INITIALIZER_LINES = 30 +SHOW_USED_FILES = YES +SHOW_FILES = YES +SHOW_NAMESPACES = YES +FILE_VERSION_FILTER = +LAYOUT_FILE = +CITE_BIB_FILES = +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- +QUIET = NO +WARNINGS = YES +WARN_IF_UNDOCUMENTED = YES +WARN_IF_DOC_ERROR = YES +WARN_NO_PARAMDOC = NO +WARN_AS_ERROR = NO +WARN_FORMAT = "$file:$line: $text" +WARN_LOGFILE = +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- +INPUT = ../src +INPUT_ENCODING = UTF-8 +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice +RECURSIVE = YES +EXCLUDE = +EXCLUDE_SYMLINKS = NO +EXCLUDE_PATTERNS = +EXCLUDE_SYMBOLS = +EXAMPLE_PATH = +EXAMPLE_PATTERNS = * +EXAMPLE_RECURSIVE = NO +IMAGE_PATH = +INPUT_FILTER = +FILTER_PATTERNS = +FILTER_SOURCE_FILES = NO +FILTER_SOURCE_PATTERNS = +USE_MDFILE_AS_MAINPAGE = +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- +SOURCE_BROWSER = YES +INLINE_SOURCES = NO +STRIP_CODE_COMMENTS = YES +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +REFERENCES_LINK_SOURCE = YES +SOURCE_TOOLTIPS = YES +USE_HTAGS = NO +VERBATIM_HEADERS = YES +CLANG_ASSISTED_PARSING = NO +CLANG_ADD_INC_PATHS = YES +CLANG_OPTIONS = +CLANG_DATABASE_PATH = +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- +ALPHABETICAL_INDEX = YES +IGNORE_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- +GENERATE_HTML = YES +HTML_OUTPUT = html +HTML_FILE_EXTENSION = .html +HTML_HEADER = +HTML_FOOTER = +HTML_STYLESHEET = +HTML_EXTRA_STYLESHEET = +HTML_EXTRA_FILES = +HTML_COLORSTYLE_HUE = 220 +HTML_COLORSTYLE_SAT = 100 +HTML_COLORSTYLE_GAMMA = 80 +HTML_TIMESTAMP = NO +HTML_DYNAMIC_MENUS = YES +HTML_DYNAMIC_SECTIONS = NO +HTML_INDEX_NUM_ENTRIES = 100 +GENERATE_DOCSET = NO +DOCSET_FEEDNAME = "Doxygen generated docs" +DOCSET_BUNDLE_ID = org.doxygen.Project +DOCSET_PUBLISHER_ID = org.doxygen.Publisher +DOCSET_PUBLISHER_NAME = Publisher +GENERATE_HTMLHELP = NO +CHM_FILE = +HHC_LOCATION = +GENERATE_CHI = NO +CHM_INDEX_ENCODING = +BINARY_TOC = NO +TOC_EXPAND = NO +GENERATE_QHP = NO +QCH_FILE = +QHP_NAMESPACE = org.doxygen.Project +QHP_VIRTUAL_FOLDER = doc +QHP_CUST_FILTER_NAME = +QHP_CUST_FILTER_ATTRS = +QHP_SECT_FILTER_ATTRS = +QHG_LOCATION = +GENERATE_ECLIPSEHELP = NO +ECLIPSE_DOC_ID = org.doxygen.Project +DISABLE_INDEX = NO +GENERATE_TREEVIEW = YES +ENUM_VALUES_PER_LINE = 4 +TREEVIEW_WIDTH = 250 +EXT_LINKS_IN_WINDOW = NO +HTML_FORMULA_FORMAT = png +FORMULA_FONTSIZE = 10 +FORMULA_TRANSPARENT = YES +FORMULA_MACROFILE = +USE_MATHJAX = NO +MATHJAX_FORMAT = HTML-CSS +MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 +MATHJAX_EXTENSIONS = +MATHJAX_CODEFILE = +SEARCHENGINE = YES +SERVER_BASED_SEARCH = NO +EXTERNAL_SEARCH = NO +SEARCHENGINE_URL = +SEARCHDATA_FILE = searchdata.xml +EXTERNAL_SEARCH_ID = +EXTRA_SEARCH_MAPPINGS = +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- +GENERATE_LATEX = NO +LATEX_OUTPUT = latex +LATEX_CMD_NAME = +MAKEINDEX_CMD_NAME = makeindex +LATEX_MAKEINDEX_CMD = makeindex +COMPACT_LATEX = NO +PAPER_TYPE = a4 +EXTRA_PACKAGES = +LATEX_HEADER = +LATEX_FOOTER = +LATEX_EXTRA_STYLESHEET = +LATEX_EXTRA_FILES = +PDF_HYPERLINKS = YES +USE_PDFLATEX = YES +LATEX_BATCHMODE = NO +LATEX_HIDE_INDICES = NO +LATEX_SOURCE_CODE = NO +LATEX_BIB_STYLE = plain +LATEX_TIMESTAMP = NO +LATEX_EMOJI_DIRECTORY = +#--------------------------------------------------------------------------- +# Configuration options related to the RTF output +#--------------------------------------------------------------------------- +GENERATE_RTF = NO +RTF_OUTPUT = rtf +COMPACT_RTF = NO +RTF_HYPERLINKS = NO +RTF_STYLESHEET_FILE = +RTF_EXTENSIONS_FILE = +RTF_SOURCE_CODE = NO +#--------------------------------------------------------------------------- +# Configuration options related to the man page output +#--------------------------------------------------------------------------- +GENERATE_MAN = NO +MAN_OUTPUT = man +MAN_EXTENSION = .3 +MAN_SUBDIR = +MAN_LINKS = NO +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- +GENERATE_XML = NO +XML_OUTPUT = xml +XML_PROGRAMLISTING = YES +XML_NS_MEMB_FILE_SCOPE = NO +#--------------------------------------------------------------------------- +# Configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- +GENERATE_DOCBOOK = NO +DOCBOOK_OUTPUT = docbook +DOCBOOK_PROGRAMLISTING = NO +#--------------------------------------------------------------------------- +# Configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- +GENERATE_AUTOGEN_DEF = NO +#--------------------------------------------------------------------------- +# Configuration options related to Sqlite3 output +#--------------------------------------------------------------------------- +#--------------------------------------------------------------------------- +# Configuration options related to the Perl module output +#--------------------------------------------------------------------------- +GENERATE_PERLMOD = NO +PERLMOD_LATEX = NO +PERLMOD_PRETTY = YES +PERLMOD_MAKEVAR_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = NO +EXPAND_ONLY_PREDEF = NO +SEARCH_INCLUDES = YES +INCLUDE_PATH = +INCLUDE_FILE_PATTERNS = +PREDEFINED = +EXPAND_AS_DEFINED = +SKIP_FUNCTION_MACROS = YES +#--------------------------------------------------------------------------- +# Configuration options related to external references +#--------------------------------------------------------------------------- +TAGFILES = +GENERATE_TAGFILE = +ALLEXTERNALS = NO +EXTERNAL_GROUPS = YES +EXTERNAL_PAGES = YES +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- +CLASS_DIAGRAMS = YES +DIA_PATH = +HIDE_UNDOC_RELATIONS = YES +HAVE_DOT = NO +DOT_NUM_THREADS = 0 +DOT_FONTNAME = Helvetica +DOT_FONTSIZE = 10 +DOT_FONTPATH = +CLASS_GRAPH = YES +COLLABORATION_GRAPH = YES +GROUP_GRAPHS = YES +UML_LOOK = NO +UML_LIMIT_NUM_FIELDS = 10 +DOT_UML_DETAILS = NO +DOT_WRAP_THRESHOLD = 17 +TEMPLATE_RELATIONS = NO +INCLUDE_GRAPH = YES +INCLUDED_BY_GRAPH = YES +CALL_GRAPH = NO +CALLER_GRAPH = NO +GRAPHICAL_HIERARCHY = YES +DIRECTORY_GRAPH = YES +DOT_IMAGE_FORMAT = png +INTERACTIVE_SVG = NO +DOT_PATH = +DOTFILE_DIRS = +MSCFILE_DIRS = +DIAFILE_DIRS = +PLANTUML_JAR_PATH = +PLANTUML_CFG_FILE = +PLANTUML_INCLUDE_PATH = +DOT_GRAPH_MAX_NODES = 50 +MAX_DOT_GRAPH_DEPTH = 0 +DOT_TRANSPARENT = NO +DOT_MULTI_TARGETS = NO +GENERATE_LEGEND = YES +DOT_CLEANUP = YES diff --git a/docs/POLICIES.md b/docs/POLICIES.md new file mode 100644 index 000000000..328d31703 --- /dev/null +++ b/docs/POLICIES.md @@ -0,0 +1,261 @@ +# Orb and pktvisor Policy Driven Configuration + +**_Draft_** + +Orb and pktvisor observability configuration is policy driven. + +pktvisor maybe run stand alone, or with the Orb control plane. In the latter configuration, orb-agent controls the +pktvisord process and allows centralized configuration via orb-api. + +## Base Concepts + +### pktvisor Taps + +Taps are named, host specific connections to raw stream data accessed by pktvisord. They represent configuration data +only; they do not cause any processing to take place in pktvisord. They should be referenced by Collection Policies by +name (see below). + +Taps may be configured on the command line at agent start up (often using a configuration management system) either +directly in pktvisord (via command line or admin API) when running stand alone, or indirectly via orb-agent. See Command +Line Examples below. + +`taps.yaml` + +```yaml +version: "1.0" + +visor: + # each tap has input module specific configuration options + taps: + # a pcap tap which uses eth0 and is referenced by the identifier "anycast" + anycast: + type: pcap + config: + iface: eth0 + # an sflow tap which listens on the given IP and port, referenced by the identifier "pop_switch" + pop_switch: + type: sflow + config: + port: 6343 + bind: 192.168.1.1 + # a dnstap tap which gets its stream from the given socket, named "trex_tap" + trex_tap: + type: dnstap + config: + socket: /var/dns.sock +``` + +### pktvisor Collection Policies + +Collection policies direct pktvisor to use taps to create an instance of an input stream (possibly with a filter), and +attach handlers to it. Processing takes place, and the data is exposed for sinks to collect. These policies may be given +directly to pktvisor (via command line or admin API) in standalone mode, or via Orb control plane (in which case they +are not stored in a file, but rather in the control plane database). + +`collection-policy-anycast.yaml` + +```yaml +version: "1.0" + +visor: + collection: + # policy name and description + anycast_dns: + description: "base anycast DNS policy" + # input stream to create based on the given tap and optional filter config + input: + # this must reference a tap name, or application of the policy will fail + tap: anycast + # this must match the type of the matching tap name. or application of the policy will fail + type: pcap + filter: + bpf: "port 53" + # stream handlers to attach to this input stream + # these decide exactly which data to summarize and expose for collection + handlers: + # default configuration for the stream handlers + config: + periods: 5 + max_deep_sample: 50 + modules: + # the keys at this level are unique identifiers + default_net: + type: net + udp_traffic: + type: net + config: + protocols: [ udp ] + metrics: + enable: + - top_ips + default_dns: + type: dns + config: + max_deep_sample: 75 + # time window analyzers + analyzers: + modules: + nx_attack: + type: dns_random_label + special_domain: + type: dns + # specify that the stream handler module requires >= specific version to be successfully applied + require_version: "1.0" + config: + # must match the available configuration options for this version of this stream handler + qname_suffix: .mydomain.com + metrics: + disable: + - top_qtypes + - top_udp_ports +``` + +### Standalone Command Line Example + +#### Standalone agent start up + +When running without Orb, the tap and the collection config can be passed directly to pktvisor. + +```shell +$ pktvisord --config taps.yaml --config collection-policy-anycast.yaml +``` + +They may also be combined into a single YAML file (the schemas will merge) and passed in with one `--config` option. + +The admin-api (or prometheus output, pktvisor-cli, etc) should then be used to collect the results manually. + +## Orb Concepts + +Orb moves most of the configuration to a central control plane. The only configuration that remains at the agent is the +Tap configuration (because it is host specific), and Vitals configuration (below). + +### Vitals and Selector Configurations + +Orb needs the ability to address the agents that it is controlling. It does this by matching Selectors with Vitals. + +#### Vitals + +orb-agent is told on startup what its Vitals are: these are arbitrary key value pairs which typically represent +information such as region, pop, and node type. + +`vitals.yaml` + +```yaml +version: "1.0" + +orb: + vitals: + region: EU + pop: ams02 + node_type: dns +``` + +#### vitals on orb-agent start up + +```shell +$ orb-agent --config vitals.yaml +``` + +#### combining vitals and taps on orb-agent start up + +Since both Taps and Vitals are necessary for orb-agent start up, you can pass both in via two separate config files: + +```shell +$ orb-agent --config taps.yaml --config vitals.yaml +``` + +Or instead combine them into a single file: + +`orb-agent.yaml` + +```yaml +version: "1.0" + +pktvisor: + taps: + anycast: + type: pcap + config: + iface: eth0 +orb: + vitals: + region: EU + pop: ams02 + node_type: dns +``` + +```shell +$ orb-agent --config orb-agent.yaml +``` + +### Orb Selectors + +Selectors are named configurations of arbitrary key value pairs which can match against the Vitals of the agents +available in the Orb ecosystem. They may be thought of as groups of agents. These names are referenced in Orb Policies. +pktvisord does not read this configuration or use this data; it is used only by orb-agent. This schema is found only in +the control plane, not on the command line or in files. + +```yaml +version: "1.0" + +orb: + selectors: + all_dns: + node_type: dns + eu_dns: + region: EU + node_type: dns +``` + +### Orb Sinks + +Orb includes a metric collection system. Sinks specify where to send the summarized metric data. pktvisord does not read +this configuration or use this data; it is used only by orb-agent. This schema is found only in the control plane, not +on the command line or in files. + +```yaml +version: "1.0" + +orb: + sinks: + default_prometheus: + type: prometheus_exporter + address: 0.0.0.0:9598 + default_namespace: service + my_s3: + type: aws_s3 + bucket: my-bucket + compression: gzip + region: us-east-1 +``` + +### Orb Policies + +An Orb policy ties together Selectors, a Collection Policy, and one or more Sinks. pktvisord does not read this +configuration or use this data; it is used only by orb-agent. This schema is found only in the control plane, not on the +command line or in files. + +orb-agent will be made aware of the collection policy and the sinks if this selector matches its vitals. In case of a +match, orb-agent will attempt to apply the collection policy to its pktvisord, and update the control plane about +success or failure. Upon success, the sink will be created. + +```yaml +version: "1.0" + +orb: + policy: + selectors: + - eu_dns + collection: anycast_dns + sinks: + - default_prometheus +``` + + + + + + + + + diff --git a/docs/images/pktvisor-grafana-screenshot1.png b/docs/images/pktvisor-grafana-screenshot1.png new file mode 100644 index 000000000..15eea55ae Binary files /dev/null and b/docs/images/pktvisor-grafana-screenshot1.png differ diff --git a/docs/images/pktvisor-grafana-screenshot2.png b/docs/images/pktvisor-grafana-screenshot2.png new file mode 100644 index 000000000..2453ff1d9 Binary files /dev/null and b/docs/images/pktvisor-grafana-screenshot2.png differ diff --git a/docs/images/pktvisor3-grafana-screenshot.png b/docs/images/pktvisor3-grafana-screenshot.png deleted file mode 100644 index df27b310f..000000000 Binary files a/docs/images/pktvisor3-grafana-screenshot.png and /dev/null differ diff --git a/golang/README.md b/golang/README.md new file mode 100644 index 000000000..7c6b3c0ec --- /dev/null +++ b/golang/README.md @@ -0,0 +1,7 @@ +# Golang Code + +This directory contains utilities for pktvisor written in Go. + +* [pktvisor-cli](cmd/pktvisor-cli/main.go) - A command line utility for observing pktvisor metric output on a node ( + local or remote) directly in a terminal + diff --git a/golang/cmd/pktvisor-cli/main.go b/golang/cmd/pktvisor-cli/main.go index c992d94b6..0c6615524 100644 --- a/golang/cmd/pktvisor-cli/main.go +++ b/golang/cmd/pktvisor-cli/main.go @@ -50,7 +50,7 @@ Options: flag.Parse() if *wantVersion { - fmt.Println(client.VizerVersionNum) + fmt.Println(client.VisorVersionNum) return } if *wantHelp { @@ -381,7 +381,7 @@ func layout(g *gocui.Gui) error { if err != gocui.ErrUnknownView { return err } - v.Title = fmt.Sprintf("pktvisor-cli (client: %s | server: %s)", client.VizerVersionNum, serverVersion) + v.Title = fmt.Sprintf("pktvisor-cli (client: %s | server: %s)", client.VisorVersionNum, serverVersion) } //if currentView == "main" { diff --git a/golang/pkg/client/version.go.in b/golang/pkg/client/version.go.in index 41627ec02..833c08d07 100644 --- a/golang/pkg/client/version.go.in +++ b/golang/pkg/client/version.go.in @@ -5,7 +5,7 @@ package client var ( - VizerVersionNum = "${VIZER_VERSION_NUM}" - VizerVersion = "${VIZER_VERSION}" + VisorVersionNum = "${VISOR_VERSION_NUM}" + VisorVersion = "${VISOR_VERSION}" ) diff --git a/integration_tests/CMakeLists.txt b/integration_tests/CMakeLists.txt index 7ad847299..63169655e 100644 --- a/integration_tests/CMakeLists.txt +++ b/integration_tests/CMakeLists.txt @@ -6,16 +6,16 @@ set(PCAP_BINARY ${CMAKE_BINARY_DIR}/bin/pktvisor-pcap) set(INT_SH ${CMAKE_SOURCE_DIR}/integration_tests/integration.sh) set(WORKING_DIR ${CMAKE_SOURCE_DIR}/integration_tests) -macro(vizer_int_test name) +macro(visor_int_test name) add_test(NAME ${name} WORKING_DIRECTORY ${WORKING_DIR} COMMAND ${INT_SH} ${PCAP_BINARY} ${TEMPLATE_DIR}/${name} -- ${HOST_VAR} --geo-city ${FIXTURE_DIR}/GeoIP2-City-Test.mmdb --geo-asn ${FIXTURE_DIR}/GeoIP2-ISP-Test.mmdb ${FIXTURE_DIR}/${name}.pcap) endmacro() -vizer_int_test(dns_ipv4_udp) -vizer_int_test(dns_ipv4_tcp) -vizer_int_test(dns_ipv6_udp) -vizer_int_test(dns_ipv6_tcp) +visor_int_test(dns_ipv4_udp) +visor_int_test(dns_ipv4_tcp) +visor_int_test(dns_ipv6_udp) +visor_int_test(dns_ipv6_tcp) # this allows local, non-public integration tests (for example, on private pcap data) #add_test(NAME external-tests diff --git a/integration_tests/window-schema.json b/integration_tests/window-schema.json new file mode 100644 index 000000000..1f1cc5257 --- /dev/null +++ b/integration_tests/window-schema.json @@ -0,0 +1,2369 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "$id": "http://example.com/example.json", + "type": "object", + "title": "The root schema", + "description": "The root schema comprises the entire JSON document.", + "default": {}, + "required": [ + "5m" + ], + "properties": { + "5m": { + "$id": "#/properties/5m", + "type": "object", + "title": "The 5m schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "required": [ + "dns", + "packets" + ], + "properties": { + "dns": { + "$id": "#/properties/5m/properties/dns", + "type": "object", + "title": "The dns schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "required": [ + "cardinality", + "period", + "top_nxdomain", + "top_qname2", + "top_qname3", + "top_qtype", + "top_rcode", + "top_refused", + "top_srvfail", + "top_udp_ports", + "wire_packets", + "xact" + ], + "properties": { + "cardinality": { + "$id": "#/properties/5m/properties/dns/properties/cardinality", + "type": "object", + "title": "The cardinality schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "qname": 190550 + } + ], + "required": [ + "qname" + ], + "properties": { + "qname": { + "$id": "#/properties/5m/properties/dns/properties/cardinality/properties/qname", + "type": "integer", + "title": "The qname schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 190550 + ] + } + }, + "additionalProperties": false + }, + "period": { + "$id": "#/properties/5m/properties/dns/properties/period", + "type": "object", + "title": "The period schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "length": 247, + "start_ts": 1616503565 + } + ], + "required": [ + "length", + "start_ts" + ], + "properties": { + "length": { + "$id": "#/properties/5m/properties/dns/properties/period/properties/length", + "type": "integer", + "title": "The length schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 247 + ] + }, + "start_ts": { + "$id": "#/properties/5m/properties/dns/properties/period/properties/start_ts", + "type": "integer", + "title": "The start_ts schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1616503565 + ] + } + }, + "additionalProperties": false + }, + "top_nxdomain": { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain", + "type": "array", + "title": "The top_nxdomain schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 354, + "name": "domain" + }, + { + "estimate": 309, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 354, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 354 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_qname2": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2", + "type": "array", + "title": "The top_qname2 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 108037, + "name": "domain" + }, + { + "estimate": 90135, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 108037, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 108037 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_qname3": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3", + "type": "array", + "title": "The top_qname3 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 49680, + "name": "domain" + }, + { + "estimate": 36165, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 49680, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 49680 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_qtype": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype", + "type": "array", + "title": "The top_qtype schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 1205079, + "name": "A" + }, + { + "estimate": 442934, + "name": "AAAA" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 1205079, + "name": "A" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1205079 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "A" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_rcode": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode", + "type": "array", + "title": "The top_rcode schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 875401, + "name": "NOERROR" + }, + { + "estimate": 51947, + "name": "NXDOMAIN" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 875401, + "name": "NOERROR" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 875401 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "NOERROR" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_refused": { + "$id": "#/properties/5m/properties/dns/properties/top_refused", + "type": "array", + "title": "The top_refused schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 2384, + "name": "domain" + }, + { + "estimate": 1492, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_refused/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_refused/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 2384, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_refused/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2384 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_refused/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_srvfail": { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail", + "type": "array", + "title": "The top_srvfail schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 36, + "name": "domain" + }, + { + "estimate": 14, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 36, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 36 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_udp_ports": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports", + "type": "array", + "title": "The top_udp_ports schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 2229, + "name": "35880" + }, + { + "estimate": 2209, + "name": "10868" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 2229, + "name": "35880" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2229 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "35880" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "wire_packets": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets", + "type": "object", + "title": "The wire_packets schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "deep_samples": 1880774, + "ipv4": 2527127, + "ipv6": 1196818, + "noerror": 1734409, + "nxdomain": 103299, + "queries": 1862389, + "rates": { + "total": { + "p50": 14629, + "p90": 15719, + "p95": 15765, + "p99": 15851 + } + }, + "refused": 23754, + "replies": 1861556, + "srvfail": 94, + "tcp": 1468, + "total": 3723945, + "udp": 3722477 + } + ], + "required": [ + "deep_samples", + "ipv4", + "ipv6", + "noerror", + "nxdomain", + "queries", + "refused", + "replies", + "srvfail", + "tcp", + "total", + "udp" + ], + "properties": { + "deep_samples": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/deep_samples", + "type": "integer", + "title": "The deep_samples schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1880774 + ] + }, + "ipv4": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/ipv4", + "type": "integer", + "title": "The ipv4 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2527127 + ] + }, + "ipv6": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/ipv6", + "type": "integer", + "title": "The ipv6 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1196818 + ] + }, + "noerror": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/noerror", + "type": "integer", + "title": "The noerror schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1734409 + ] + }, + "nxdomain": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/nxdomain", + "type": "integer", + "title": "The nxdomain schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 103299 + ] + }, + "queries": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/queries", + "type": "integer", + "title": "The queries schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1862389 + ] + }, + "rates": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/rates", + "type": "object", + "title": "The rates schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "total": { + "p50": 14629, + "p90": 15719, + "p95": 15765, + "p99": 15851 + } + } + ], + "required": [ + "total" + ], + "properties": { + "total": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/rates/properties/total", + "type": "object", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 14629, + "p90": 15719, + "p95": 15765, + "p99": 15851 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/rates/properties/total/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 14629 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/rates/properties/total/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15719 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/rates/properties/total/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15765 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/rates/properties/total/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15851 + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "refused": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/refused", + "type": "integer", + "title": "The refused schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 23754 + ] + }, + "replies": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/replies", + "type": "integer", + "title": "The replies schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1861556 + ] + }, + "srvfail": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/srvfail", + "type": "integer", + "title": "The srvfail schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 94 + ] + }, + "tcp": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/tcp", + "type": "integer", + "title": "The tcp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1468 + ] + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 3723945 + ] + }, + "udp": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/udp", + "type": "integer", + "title": "The udp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 3722477 + ] + } + }, + "additionalProperties": false + }, + "xact": { + "$id": "#/properties/5m/properties/dns/properties/xact", + "type": "object", + "title": "The xact schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "counts": { + "timed_out": 742, + "total": 1861473 + }, + "in": { + "quantiles_us": { + "p50": 155, + "p90": 500, + "p95": 826, + "p99": 5853 + }, + "top_slow": [ + { + "estimate": 6590, + "name": "domain" + }, + { + "estimate": 3466, + "name": "domain" + }, + { + "estimate": 3306, + "name": "domain" + }, + { + "estimate": 3074, + "name": "domain" + }, + { + "estimate": 2673, + "name": "domain" + }, + { + "estimate": 1411, + "name": "domain" + }, + { + "estimate": 1344, + "name": "domain" + }, + { + "estimate": 1181, + "name": "domain" + }, + { + "estimate": 998, + "name": "domain" + }, + { + "estimate": 911, + "name": "domain" + } + ], + "total": 1849925 + }, + "out": { + "quantiles_us": { + "p50": 274, + "p90": 2981, + "p95": 4301, + "p99": 27711 + }, + "top_slow": [ + { + "estimate": 15, + "name": "domain" + }, + { + "estimate": 15, + "name": "domain" + }, + { + "estimate": 14, + "name": "domain" + }, + { + "estimate": 13, + "name": "domain" + }, + { + "estimate": 12, + "name": "domain" + }, + { + "estimate": 12, + "name": "domain" + }, + { + "estimate": 11, + "name": "domain" + }, + { + "estimate": 10, + "name": "domain" + }, + { + "estimate": 9, + "name": "domain" + }, + { + "estimate": 8, + "name": "domain" + } + ], + "total": 11548 + } + } + ], + "required": [ + "counts", + "in", + "out" + ], + "properties": { + "counts": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/counts", + "type": "object", + "title": "The counts schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "timed_out": 742, + "total": 1861473 + } + ], + "required": [ + "timed_out", + "total" + ], + "properties": { + "timed_out": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/counts/properties/timed_out", + "type": "integer", + "title": "The timed_out schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 742 + ] + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/counts/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1861473 + ] + } + }, + "additionalProperties": false + }, + "in": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in", + "type": "object", + "title": "The in schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "quantiles_us": { + "p50": 155, + "p90": 500, + "p95": 826, + "p99": 5853 + }, + "top_slow": [ + { + "estimate": 6590, + "name": "domain" + }, + { + "estimate": 3466, + "name": "domain" + }, + { + "estimate": 3306, + "name": "domain" + }, + { + "estimate": 3074, + "name": "domain" + }, + { + "estimate": 2673, + "name": "domain" + }, + { + "estimate": 1411, + "name": "domain" + }, + { + "estimate": 1344, + "name": "domain" + }, + { + "estimate": 1181, + "name": "domain" + }, + { + "estimate": 998, + "name": "domain" + }, + { + "estimate": 911, + "name": "domain" + } + ], + "total": 1849925 + } + ], + "required": [ + "quantiles_us", + "top_slow", + "total" + ], + "properties": { + "quantiles_us": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/quantiles_us", + "type": "object", + "title": "The quantiles_us schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 155, + "p90": 500, + "p95": 826, + "p99": 5853 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/quantiles_us/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 155 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/quantiles_us/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 500 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/quantiles_us/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 826 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/quantiles_us/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 5853 + ] + } + }, + "additionalProperties": false + }, + "top_slow": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow", + "type": "array", + "title": "The top_slow schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 6590, + "name": "domain" + }, + { + "estimate": 3466, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 6590, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 6590 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1849925 + ] + } + }, + "additionalProperties": false + }, + "out": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out", + "type": "object", + "title": "The out schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "quantiles_us": { + "p50": 274, + "p90": 2981, + "p95": 4301, + "p99": 27711 + }, + "top_slow": [ + { + "estimate": 15, + "name": "domain" + }, + { + "estimate": 15, + "name": "domain" + }, + { + "estimate": 14, + "name": "domain" + }, + { + "estimate": 13, + "name": "domain" + }, + { + "estimate": 12, + "name": "domain" + }, + { + "estimate": 12, + "name": "domain" + }, + { + "estimate": 11, + "name": "domain" + }, + { + "estimate": 10, + "name": "domain" + }, + { + "estimate": 9, + "name": "domain" + }, + { + "estimate": 8, + "name": "domain" + } + ], + "total": 11548 + } + ], + "required": [ + "quantiles_us", + "top_slow", + "total" + ], + "properties": { + "quantiles_us": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us", + "type": "object", + "title": "The quantiles_us schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 274, + "p90": 2981, + "p95": 4301, + "p99": 27711 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 274 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2981 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 4301 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 27711 + ] + } + }, + "additionalProperties": false + }, + "top_slow": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow", + "type": "array", + "title": "The top_slow schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 15, + "name": "domain" + }, + { + "estimate": 15, + "name": "domain" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 15, + "name": "domain" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "domain" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 11548 + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "packets": { + "$id": "#/properties/5m/properties/packets", + "type": "object", + "title": "The packets schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "cardinality": { + "dst_ips_out": 26319, + "src_ips_in": 26373 + }, + "deep_samples": 1884688, + "in": 1877981, + "ipv4": 2531766, + "ipv6": 1200127, + "other_l4": 25, + "out": 1853912, + "period": { + "length": 247, + "start_ts": 1616503565 + }, + "rates": { + "pps_in": { + "p50": 7383, + "p90": 7929, + "p95": 7945, + "p99": 7983 + }, + "pps_out": { + "p50": 7288, + "p90": 7828, + "p95": 7845, + "p99": 7880 + }, + "pps_total": { + "p50": 14665, + "p90": 15740, + "p95": 15792, + "p99": 15852 + } + }, + "tcp": 8268, + "top_ASN": [ + { + "estimate": 245448, + "name": "7018/ATT-INTERNET4" + }, + { + "estimate": 224905, + "name": "15169/GOOGLE" + }, + { + "estimate": 117379, + "name": "7922/COMCAST-7922" + }, + { + "estimate": 113333, + "name": "20057/ATT-MOBILITY-LLC-AS20057" + }, + { + "estimate": 109717, + "name": "36692/OPENDNS" + }, + { + "estimate": 90290, + "name": "20940/Akamai International B.V." + }, + { + "estimate": 83381, + "name": "20115/CHARTER-20115" + }, + { + "estimate": 64620, + "name": "13335/CLOUDFLARENET" + }, + { + "estimate": 51054, + "name": "11426/TWC-11426-CAROLINAS" + }, + { + "estimate": 43789, + "name": "28573/CLARO S.A." + } + ], + "top_geoLoc": [ + { + "estimate": 589169, + "name": "NA/United States/GA/Atlanta" + }, + { + "estimate": 196890, + "name": "NA/United States" + }, + { + "estimate": 145328, + "name": "NA/United States/FL/Miami" + }, + { + "estimate": 115193, + "name": "NA/United States/GA/Tucker" + }, + { + "estimate": 70181, + "name": "NA/United States/GA/Marietta" + }, + { + "estimate": 52291, + "name": "NA/United States/NC/Charlotte" + }, + { + "estimate": 45892, + "name": "NA/United States/TN/Nashville" + }, + { + "estimate": 41336, + "name": "SA/Brazil" + }, + { + "estimate": 33813, + "name": "NA/United States/FL/Jacksonville" + }, + { + "estimate": 31838, + "name": "SA/Ecuador/G/Guayaquil" + } + ], + "top_ipv4": [ + { + "estimate": 6172, + "name": "66.32.254.0" + }, + { + "estimate": 5650, + "name": "163.114.202.0" + }, + { + "estimate": 5520, + "name": "163.114.202.0" + }, + { + "estimate": 4425, + "name": "107.127.34.0" + }, + { + "estimate": 4320, + "name": "107.127.34.0" + }, + { + "estimate": 4236, + "name": "141.207.147.0" + }, + { + "estimate": 4207, + "name": "107.127.34.0" + }, + { + "estimate": 4058, + "name": "107.127.34.0" + }, + { + "estimate": 3925, + "name": "68.114.47.0" + }, + { + "estimate": 3787, + "name": "141.207.151.0" + } + ], + "top_ipv6": [ + { + "estimate": 11769, + "name": "2a04:e4c0:24::67" + }, + { + "estimate": 11189, + "name": "2a04:e4c0:24::69" + }, + { + "estimate": 11146, + "name": "2a04:e4c0:24::68" + }, + { + "estimate": 10984, + "name": "2a04:e4c0:24::64" + }, + { + "estimate": 10975, + "name": "2a04:e4c0:24::65" + }, + { + "estimate": 10846, + "name": "2a04:e4c0:24::66" + }, + { + "estimate": 10644, + "name": "2a04:e4c0:24::71" + }, + { + "estimate": 10402, + "name": "2a04:e4c0:24::72" + }, + { + "estimate": 10297, + "name": "2a04:e4c0:24::70" + }, + { + "estimate": 10162, + "name": "2a04:e4c0:24::73" + } + ], + "total": 3731893, + "udp": 3723600 + } + ], + "required": [ + "cardinality", + "deep_samples", + "in", + "ipv4", + "ipv6", + "other_l4", + "out", + "period", + "tcp", + "top_ASN", + "top_geoLoc", + "top_ipv4", + "top_ipv6", + "total", + "udp" + ], + "properties": { + "cardinality": { + "$id": "#/properties/5m/properties/packets/properties/cardinality", + "type": "object", + "title": "The cardinality schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "dst_ips_out": 26319, + "src_ips_in": 26373 + } + ], + "required": [ + "dst_ips_out", + "src_ips_in" + ], + "properties": { + "dst_ips_out": { + "$id": "#/properties/5m/properties/packets/properties/cardinality/properties/dst_ips_out", + "type": "integer", + "title": "The dst_ips_out schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 26319 + ] + }, + "src_ips_in": { + "$id": "#/properties/5m/properties/packets/properties/cardinality/properties/src_ips_in", + "type": "integer", + "title": "The src_ips_in schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 26373 + ] + } + }, + "additionalProperties": false + }, + "deep_samples": { + "$id": "#/properties/5m/properties/packets/properties/deep_samples", + "type": "integer", + "title": "The deep_samples schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1884688 + ] + }, + "in": { + "$id": "#/properties/5m/properties/packets/properties/in", + "type": "integer", + "title": "The in schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1877981 + ] + }, + "ipv4": { + "$id": "#/properties/5m/properties/packets/properties/ipv4", + "type": "integer", + "title": "The ipv4 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2531766 + ] + }, + "ipv6": { + "$id": "#/properties/5m/properties/packets/properties/ipv6", + "type": "integer", + "title": "The ipv6 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1200127 + ] + }, + "other_l4": { + "$id": "#/properties/5m/properties/packets/properties/other_l4", + "type": "integer", + "title": "The other_l4 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 25 + ] + }, + "out": { + "$id": "#/properties/5m/properties/packets/properties/out", + "type": "integer", + "title": "The out schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1853912 + ] + }, + "period": { + "$id": "#/properties/5m/properties/packets/properties/period", + "type": "object", + "title": "The period schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "length": 247, + "start_ts": 1616503565 + } + ], + "required": [ + "length", + "start_ts" + ], + "properties": { + "length": { + "$id": "#/properties/5m/properties/packets/properties/period/properties/length", + "type": "integer", + "title": "The length schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 247 + ] + }, + "start_ts": { + "$id": "#/properties/5m/properties/packets/properties/period/properties/start_ts", + "type": "integer", + "title": "The start_ts schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1616503565 + ] + } + }, + "additionalProperties": false + }, + "rates": { + "$id": "#/properties/5m/properties/packets/properties/rates", + "type": "object", + "title": "The rates schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "pps_in": { + "p50": 7383, + "p90": 7929, + "p95": 7945, + "p99": 7983 + }, + "pps_out": { + "p50": 7288, + "p90": 7828, + "p95": 7845, + "p99": 7880 + }, + "pps_total": { + "p50": 14665, + "p90": 15740, + "p95": 15792, + "p99": 15852 + } + } + ], + "required": [ + "pps_in", + "pps_out", + "pps_total" + ], + "properties": { + "pps_in": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_in", + "type": "object", + "title": "The pps_in schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 7383, + "p90": 7929, + "p95": 7945, + "p99": 7983 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_in/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7383 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_in/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7929 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_in/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7945 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_in/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7983 + ] + } + }, + "additionalProperties": false + }, + "pps_out": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_out", + "type": "object", + "title": "The pps_out schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 7288, + "p90": 7828, + "p95": 7845, + "p99": 7880 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_out/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7288 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_out/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7828 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_out/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7845 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_out/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 7880 + ] + } + }, + "additionalProperties": false + }, + "pps_total": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_total", + "type": "object", + "title": "The pps_total schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 14665, + "p90": 15740, + "p95": 15792, + "p99": 15852 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_total/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 14665 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_total/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15740 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_total/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15792 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/packets/properties/rates/properties/pps_total/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 15852 + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + "tcp": { + "$id": "#/properties/5m/properties/packets/properties/tcp", + "type": "integer", + "title": "The tcp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 8268 + ] + }, + "top_ASN": { + "$id": "#/properties/5m/properties/packets/properties/top_ASN", + "type": "array", + "title": "The top_ASN schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 245448, + "name": "7018/ATT-INTERNET4" + }, + { + "estimate": 224905, + "name": "15169/GOOGLE" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_ASN/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/packets/properties/top_ASN/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 245448, + "name": "7018/ATT-INTERNET4" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/packets/properties/top_ASN/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 245448 + ] + }, + "name": { + "$id": "#/properties/5m/properties/packets/properties/top_ASN/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "7018/ATT-INTERNET4" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_geoLoc": { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc", + "type": "array", + "title": "The top_geoLoc schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 589169, + "name": "NA/United States/GA/Atlanta" + }, + { + "estimate": 196890, + "name": "NA/United States" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 589169, + "name": "NA/United States/GA/Atlanta" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 589169 + ] + }, + "name": { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "NA/United States/GA/Atlanta" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_ipv4": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4", + "type": "array", + "title": "The top_ipv4 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 6172, + "name": "66.32.254.0" + }, + { + "estimate": 5650, + "name": "163.114.202.0" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 6172, + "name": "66.32.254.0" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 6172 + ] + }, + "name": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "66.32.254.0" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_ipv6": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6", + "type": "array", + "title": "The top_ipv6 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 11769, + "name": "2a04:e4c0:24::67" + }, + { + "estimate": 11189, + "name": "2a04:e4c0:24::69" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 11769, + "name": "2a04:e4c0:24::67" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 11769 + ] + }, + "name": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "2a04:e4c0:24::67" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "total": { + "$id": "#/properties/5m/properties/packets/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 3731893 + ] + }, + "udp": { + "$id": "#/properties/5m/properties/packets/properties/udp", + "type": "integer", + "title": "The udp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 3723600 + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/reporting/README.md b/reporting/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/reporting/pktvisor_prometheus/.gitignore b/reporting/pktvisor_prometheus/.gitignore deleted file mode 100644 index 2a9e2964b..000000000 --- a/reporting/pktvisor_prometheus/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Go -pktvisor_exporter - -# Files -.DS_Store - -# Editor swap files -*.swp -*.swo -*.swn diff --git a/reporting/pktvisor_prometheus/Dockerfile b/reporting/pktvisor_prometheus/Dockerfile deleted file mode 100644 index f6ff22773..000000000 --- a/reporting/pktvisor_prometheus/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang as builder - -RUN GIT_TERMINAL_PROMPT=0 git clone --depth 1 -b master --single-branch https://github.com/ns1/pktvisor.git $GOPATH/src/github.com/ns1/pktvisor \ - && cd $GOPATH/src/github.com/ns1/pktvisor/reporting/pktvisor_prometheus \ - && go get \ - && go build -a -installsuffix cgo -ldflags="-w -s" -o /go/bin/pktvisor_exporter - -# -------------------------------------------------------------------------------- - -FROM ubuntu:18.04 -COPY --from=builder /go/bin/pktvisor_exporter /usr/bin/pktvisor_exporter - -EXPOSE 9998 - -ENTRYPOINT [ "/usr/bin/pktvisor_exporter" ] diff --git a/reporting/pktvisor_prometheus/README.md b/reporting/pktvisor_prometheus/README.md deleted file mode 100644 index 9b30f25b9..000000000 --- a/reporting/pktvisor_prometheus/README.md +++ /dev/null @@ -1,9 +0,0 @@ -### Prometheus exporter for pktvisor - -### Local Deployment - -* Launch exporter: - -```bash -$ pktvisor_exporter -log.level=debug -``` diff --git a/reporting/pktvisor_prometheus/pktvisor/client.go b/reporting/pktvisor_prometheus/pktvisor/client.go deleted file mode 100644 index 29af42f5c..000000000 --- a/reporting/pktvisor_prometheus/pktvisor/client.go +++ /dev/null @@ -1,223 +0,0 @@ -package pktvisor - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "time" - - "github.com/prometheus/common/log" -) - -type NameCount struct { - Name string `json:"name"` - Estimate int64 `json:"estimate"` -} - -type AppSnapshot struct { - App struct { - DeepSampleRatePct int64 `json:"deep_sample_rate_pct"` - Periods int64 `json:"periods"` - SingleSummary bool `json:"single_summary"` - UpTimeMin float64 `json:"up_time_min"` - Version string `json:"version"` - } `json:"app"` - Dns struct { - Xact struct { - Open int64 `json:"open"` - } `json:"xact"` - } `json:"dns"` -} - -type StatSnapshot struct { - DNS struct { - WirePackets struct { - Ipv4 int64 `json:"ipv4"` - Ipv6 int64 `json:"ipv6"` - Queries int64 `json:"queries"` - Replies int64 `json:"replies"` - Tcp int64 `json:"tcp"` - Total int64 `json:"total"` - Udp int64 `json:"udp"` - NoError int64 `json:"noerror"` - NxDomain int64 `json:"nxdomain"` - SrvFail int64 `json:"srvfail"` - Refused int64 `json:"refused"` - } `json:"wire_packets"` - Cardinality struct { - Qname int64 `json:"qname"` - } `json:"cardinality"` - Xact struct { - Counts struct { - Total int64 `json:"total"` - } `json:"counts"` - In struct { - QuantilesUS struct { - P50 int64 `json:"p50"` - P90 int64 `json:"p90"` - P95 int64 `json:"p95"` - P99 int64 `json:"p99"` - } `json:"quantiles_us"` - TopSlow []NameCount `json:"top_slow"` - Total int64 `json:"total"` - } `json:"in"` - Out struct { - QuantilesUS struct { - P50 int64 `json:"p50"` - P90 int64 `json:"p90"` - P95 int64 `json:"p95"` - P99 int64 `json:"p99"` - } `json:"quantiles_us"` - TopSlow []NameCount `json:"top_slow"` - Total int64 `json:"total"` - } `json:"out"` - } `json:"xact"` - TopQname2 []NameCount `json:"top_qname2"` - TopQname3 []NameCount `json:"top_qname3"` - TopNX []NameCount `json:"top_nxdomain"` - TopQtype []NameCount `json:"top_qtype"` - TopRcode []NameCount `json:"top_rcode"` - TopREFUSED []NameCount `json:"top_refused"` - TopSRVFAIL []NameCount `json:"top_srvfail"` - TopUDPPorts []NameCount `json:"top_udp_ports"` - } `json:"dns"` - Packets struct { - Cardinality struct { - DstIpsOut int64 `json:"dst_ips_out"` - SrcIpsIn int64 `json:"src_ips_in"` - } `json:"cardinality"` - Ipv4 int64 `json:"ipv4"` - Ipv6 int64 `json:"ipv6"` - Tcp int64 `json:"tcp"` - Total int64 `json:"total"` - Udp int64 `json:"udp"` - In int64 `json:"in"` - Out int64 `json:"out"` - OtherL4 int64 `json:"other_l4"` - DeepSamples int64 `json:"deep_samples"` - Rates struct { - Pps_in struct { - P50 int64 `json:"p50"` - P90 int64 `json:"p90"` - P95 int64 `json:"p95"` - P99 int64 `json:"p99"` - } `json:"pps_in"` - Pps_out struct { - P50 int64 `json:"p50"` - P90 int64 `json:"p90"` - P95 int64 `json:"p95"` - P99 int64 `json:"p99"` - } `json:"pps_out"` - } `json:"rates"` - TopIpv4 []NameCount `json:"top_ipv4"` - TopIpv6 []NameCount `json:"top_ipv6"` - TopGeoLoc []NameCount `json:"top_geoLoc"` - TopASN []NameCount `json:"top_asn"` - } `json:"packets"` - Period struct { - StartTS int64 `json:"start_ts"` - Length int64 `json:"length"` - } `json:"period"` -} - -type Client struct { - Timeout time.Duration - ConnectionRetries int -} - -func NewClient(timeout time.Duration, connectionRetries int) *Client { - return &Client{time.Duration(timeout), connectionRetries} -} - -// getResponse collects an individual http.response and returns a *Response -func (c Client) getResponse(url string) ([]byte, error) { - - log.Debugf("Fetching %s \n", url) - - resp, err := c.getHTTPResponse(url) // do this earlier - - if err != nil { - return nil, fmt.Errorf("Error converting body to byte array: %v", err) - } - - // Read the body to a byte array so it can be used elsewhere - body, err := ioutil.ReadAll(resp.Body) - - defer resp.Body.Close() - - if err != nil { - return nil, fmt.Errorf("Error converting body to byte array: %v", err) - } - - return body, nil -} - -// getHTTPResponse handles the http client creation, token setting and returns the *http.response -func (c Client) getHTTPResponse(url string) (*http.Response, error) { - - client := &http.Client{ - Timeout: c.Timeout, - } - - req, err := http.NewRequest("GET", url, nil) - - if err != nil { - return nil, fmt.Errorf("Failed to create http request: %v", err) - } - - var retries = c.ConnectionRetries - for retries > 0 { - resp, err := client.Do(req) - if err != nil { - retries -= 1 - - if retries == 0 { - return nil, err - } else { - log.Infof("Retrying HTTP request %s", url) - } - } else { - return resp, nil - } - } - return nil, nil -} - -func (c *Client) GetAppStats(host string, port string) (AppSnapshot, error) { - var rawStats AppSnapshot - var emptyStats AppSnapshot - - url := fmt.Sprintf("http://%s:%s/api/v1/metrics/app", host, port) - - data, readErr := c.getResponse(url) - if readErr != nil { - return emptyStats, readErr - } - - err := json.Unmarshal(data, &rawStats) - if err != nil { - return emptyStats, err - } - - return rawStats, nil -} - -func (c *Client) GetBucketStats(host string, port string, duration int) (StatSnapshot, error) { - var rawStats map[string]StatSnapshot - var emptyStats StatSnapshot - - url := fmt.Sprintf("http://%s:%s/api/v1/metrics/bucket/%d", host, port, duration) - - data, readErr := c.getResponse(url) - if readErr != nil { - return emptyStats, readErr - } - - err := json.Unmarshal(data, &rawStats) - if err != nil { - return emptyStats, err - } - - return rawStats["1m"], nil -} diff --git a/reporting/pktvisor_prometheus/pktvisor_exporter.go b/reporting/pktvisor_prometheus/pktvisor_exporter.go deleted file mode 100644 index 24549a7b8..000000000 --- a/reporting/pktvisor_prometheus/pktvisor_exporter.go +++ /dev/null @@ -1,639 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "net/http" - _ "net/http/pprof" - "os" - "time" - - "github.com/ns1/pktvisor/reporting/pktvisor_prometheus/pktvisor" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/log" - prom_version "github.com/prometheus/common/version" -) - -const ( - namespace = "pktvisor" -) - -var ( - app_period = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "app_period"), - "Length of period to capture (s)", - nil, nil, - ) - app_uptime_min = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "app_uptime_min"), - "Uptime (minutes).", - []string{"version"}, nil, - ) - dns_wire_packets_ipv4 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_ipv4"), - "WirePackets IPv4", - nil, nil, - ) - dns_wire_packets_ipv6 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_ipv6"), - "WirePackets IPv6", - nil, nil, - ) - dns_wire_packets_queries = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_queries"), - "WirePackets Queries", - nil, nil, - ) - dns_wire_packets_replies = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_replies"), - "WirePackets Replies", - nil, nil, - ) - dns_wire_packets_tcp = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_tcp"), - "WirePackets TCP", - nil, nil, - ) - dns_wire_packets_total = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_total"), - "WirePackets Total", - nil, nil, - ) - dns_wire_packets_udp = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_udp"), - "WirePackets UDP", - nil, nil, - ) - dns_wire_packets_noerror = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_noerror"), - "WirePackets NOERROR", - nil, nil, - ) - dns_wire_packets_nxdomain = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_nxdomain"), - "WirePackets NXDOMAIN", - nil, nil, - ) - dns_wire_packets_srvfail = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_srvfail"), - "WirePackets SRVFAIL", - nil, nil, - ) - dns_wire_packets_refused = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_wire_packets_refused"), - "WirePackets REFUSED", - nil, nil, - ) - - dns_cardinality_qname = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_cardinality_qname"), - "Cardinality QName", - nil, nil, - ) - - dns_xact_counts_total = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_counts_total"), - "Xact Counts Total", - nil, nil, - ) - dns_xact_in_quantiles_us_p50 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_in_quantiles_us_p50"), - "Xact In QuantilesUS P50", - nil, nil, - ) - dns_xact_in_quantiles_us_p90 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_in_quantiles_us_p90"), - "Xact In QuantilesUS P90", - nil, nil, - ) - dns_xact_in_quantiles_us_p95 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_in_quantiles_us_p95"), - "Xact In QuantilesUS P95", - nil, nil, - ) - dns_xact_in_quantiles_us_p99 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_in_quantiles_us_p99"), - "Xact In QuantilesUS P99", - nil, nil, - ) - dns_xact_in_top_slow = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_in_top_slow"), - "Xact In Top Slow", - []string{"name"}, nil, - ) - dns_xact_in_total = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_in_total"), - "Xact In Total", - nil, nil, - ) - dns_xact_out_quantiles_us_p50 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_out_quantiles_us_p50"), - "Xact Out QuantilesUS P50", - nil, nil, - ) - dns_xact_out_quantiles_us_p90 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_out_quantiles_us_p90"), - "Xact Out QuantilesUS P90", - nil, nil, - ) - dns_xact_out_quantiles_us_p95 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_out_quantiles_us_p95"), - "Xact Out QuantilesUS P95", - nil, nil, - ) - dns_xact_out_quantiles_us_p99 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_out_quantiles_us_p99"), - "Xact Out QuantilesUS P99", - nil, nil, - ) - dns_xact_out_top_slow = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_out_top_slow"), - "Xact Out Top Slow", - []string{"name"}, nil, - ) - dns_xact_out_total = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_xact_out_total"), - "Xact Out Total", - nil, nil, - ) - - dns_top_qname2 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_qname2"), - "DNS Top Qname2", - []string{"name"}, nil, - ) - dns_top_qname3 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_qname3"), - "DNS Top Qname3", - []string{"name"}, nil, - ) - dns_top_nxdomain = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_nxdomain"), - "DNS Top NXDomain", - []string{"name"}, nil, - ) - dns_top_qtype = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_qtype"), - "DNS Top QType", - []string{"name"}, nil, - ) - dns_top_rcode = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_rcode"), - "DNS Top Rcode", - []string{"name"}, nil, - ) - dns_top_refused = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_refused"), - "DNS Top Refused", - []string{"name"}, nil, - ) - dns_top_srvfail = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_srvfail"), - "DNS Top SRVFail", - []string{"name"}, nil, - ) - dns_top_udp_ports = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "dns_top_udp_ports"), - "DNS Top UDPPorts", - []string{"name"}, nil, - ) - - packets_cardinality_dst_ips_out = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_cardinality_dst_ips_out"), - "Packets Cardinality DstIpsOut", - nil, nil, - ) - packets_cardinality_src_ips_in = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_cardinality_src_ips_in"), - "Packets Cardinality SrcIpsIn", - nil, nil, - ) - packets_ipv4 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_ipv4"), - "Packets IPv4", - nil, nil, - ) - packets_ipv6 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_ipv6"), - "Packets IPv6", - nil, nil, - ) - packets_tcp = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_tcp"), - "Packets TCP", - nil, nil, - ) - packets_total = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_total"), - "Packets Total", - nil, nil, - ) - packets_udp = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_udp"), - "Packets UDP", - nil, nil, - ) - packets_in = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_in"), - "Packets In", - nil, nil, - ) - packets_out = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_out"), - "Packets Out", - nil, nil, - ) - packets_other_l4 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_other_l4"), - "Packets Other L4", - nil, nil, - ) - packets_deep_samples = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_deep_samples"), - "Packets Deep Samples", - nil, nil, - ) - - packets_rates_pps_in_p50 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_in_p50"), - "Packets Rates PPS In P50", - nil, nil, - ) - packets_rates_pps_in_p90 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_in_p90"), - "Packets Rates PPS In P90", - nil, nil, - ) - packets_rates_pps_in_p95 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_in_p95"), - "Packets Rates PPS In P95", - nil, nil, - ) - packets_rates_pps_in_p99 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_in_p99"), - "Packets Rates PPS In P99", - nil, nil, - ) - packets_rates_pps_out_p50 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_out_p50"), - "Packets Rates PPS Out P50", - nil, nil, - ) - packets_rates_pps_out_p90 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_out_p90"), - "Packets Rates PPS Out P90", - nil, nil, - ) - packets_rates_pps_out_p95 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_out_p95"), - "Packets Rates PPS Out P95", - nil, nil, - ) - packets_rates_pps_out_p99 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_rates_pps_out_p99"), - "Packets Rates PPS Out P99", - nil, nil, - ) - - packets_top_ipv4 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_top_ipv4"), - "Packets Top IPv4", - []string{"name"}, nil, - ) - packets_top_ipv6 = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_top_ipv6"), - "Packets Top IPv6", - []string{"name"}, nil, - ) - packets_top_geoloc = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_top_geoloc"), - "Packets Top GeoLoc", - []string{"name"}, nil, - ) - packets_top_asn = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "packets_top_asn"), - "Packets Top ASN", - []string{"name"}, nil, - ) - - period_start_ts = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "period_start_ts"), - "Period Start TS", - nil, nil, - ) - period_length = prometheus.NewDesc( - prometheus.BuildFQName(namespace, "", "period_length"), - "Period Length", - nil, nil, - ) -) - -// Exporter collects Pktvisor stats from the given server and exports them using -// the prometheus metrics package. -type Exporter struct { - Client *pktvisor.Client - PktvisorHost string - PktvisorPort string - Period int -} - -// NewExporter returns an initialized Exporter. -func NewExporter(pktvisorHost string, pktvisorPort string, period int) (*Exporter, error) { - client := pktvisor.NewClient(time.Duration(10)*time.Second, 5) - log.Debugln("Init exporter") - return &Exporter{ - Client: client, - PktvisorHost: pktvisorHost, - PktvisorPort: pktvisorPort, - Period: period, - }, nil -} - -// Describe describes all the metrics ever exported by the Pktvisor exporter. -// It implements prometheus.Collector. -func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { - ch <- app_period - ch <- app_uptime_min - ch <- dns_wire_packets_ipv4 - ch <- dns_wire_packets_ipv6 - ch <- dns_wire_packets_queries - ch <- dns_wire_packets_replies - ch <- dns_wire_packets_tcp - ch <- dns_wire_packets_total - ch <- dns_wire_packets_udp - ch <- dns_wire_packets_noerror - ch <- dns_wire_packets_nxdomain - ch <- dns_wire_packets_srvfail - ch <- dns_wire_packets_refused - ch <- dns_cardinality_qname - ch <- dns_xact_counts_total - ch <- dns_xact_in_quantiles_us_p50 - ch <- dns_xact_in_quantiles_us_p90 - ch <- dns_xact_in_quantiles_us_p95 - ch <- dns_xact_in_quantiles_us_p99 - ch <- dns_xact_in_top_slow - ch <- dns_xact_in_total - ch <- dns_xact_out_quantiles_us_p50 - ch <- dns_xact_out_quantiles_us_p90 - ch <- dns_xact_out_quantiles_us_p95 - ch <- dns_xact_out_quantiles_us_p99 - ch <- dns_xact_out_top_slow - ch <- dns_xact_out_total - ch <- dns_top_qname2 - ch <- dns_top_qname3 - ch <- dns_top_nxdomain - ch <- dns_top_qtype - ch <- dns_top_rcode - ch <- dns_top_refused - ch <- dns_top_srvfail - ch <- dns_top_udp_ports - ch <- packets_cardinality_dst_ips_out - ch <- packets_cardinality_src_ips_in - ch <- packets_ipv4 - ch <- packets_ipv6 - ch <- packets_tcp - ch <- packets_total - ch <- packets_udp - ch <- packets_in - ch <- packets_out - ch <- packets_other_l4 - ch <- packets_deep_samples - ch <- packets_rates_pps_in_p50 - ch <- packets_rates_pps_in_p90 - ch <- packets_rates_pps_in_p95 - ch <- packets_rates_pps_in_p99 - ch <- packets_rates_pps_out_p50 - ch <- packets_rates_pps_out_p90 - ch <- packets_rates_pps_out_p95 - ch <- packets_rates_pps_out_p99 - ch <- packets_top_ipv4 - ch <- packets_top_ipv6 - ch <- packets_top_geoloc - ch <- packets_top_asn - ch <- period_start_ts - ch <- period_length -} - -// Collect fetches the stats from pktvisord service and deliver them as Prometheus Metrics. -// It implements prometheus.Collector. -func (e *Exporter) Collect(ch chan<- prometheus.Metric) { - if e.Client == nil { - log.Errorf("Pktvisor client not configured.") - return - } - - appMetrics, err := e.Client.GetAppStats(e.PktvisorHost, e.PktvisorPort) - if err != nil { - log.Errorf("Can't get app stats") - fmt.Println(err) - } - ch <- prometheus.MustNewConstMetric(app_period, prometheus.GaugeValue, float64(appMetrics.App.Periods)) - ch <- prometheus.MustNewConstMetric(app_uptime_min, prometheus.GaugeValue, float64(appMetrics.App.UpTimeMin), appMetrics.App.Version) - - windowMetrics, err := e.Client.GetBucketStats(e.PktvisorHost, e.PktvisorPort, e.Period) - if err != nil { - log.Errorf("Can't get window stats") - fmt.Println(err) - } - ch <- prometheus.MustNewConstMetric(dns_wire_packets_ipv4, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Ipv4)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_ipv6, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Ipv6)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_queries, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Queries)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_replies, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Replies)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_tcp, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Tcp)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_total, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Total)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_udp, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Udp)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_noerror, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.NoError)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_nxdomain, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.NxDomain)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_srvfail, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.SrvFail)) - ch <- prometheus.MustNewConstMetric(dns_wire_packets_refused, prometheus.GaugeValue, float64(windowMetrics.DNS.WirePackets.Refused)) - ch <- prometheus.MustNewConstMetric(dns_cardinality_qname, prometheus.GaugeValue, float64(windowMetrics.DNS.Cardinality.Qname)) - ch <- prometheus.MustNewConstMetric(dns_xact_counts_total, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.Counts.Total)) - ch <- prometheus.MustNewConstMetric(dns_xact_in_quantiles_us_p50, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.In.QuantilesUS.P50)) - ch <- prometheus.MustNewConstMetric(dns_xact_in_quantiles_us_p90, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.In.QuantilesUS.P90)) - ch <- prometheus.MustNewConstMetric(dns_xact_in_quantiles_us_p95, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.In.QuantilesUS.P95)) - ch <- prometheus.MustNewConstMetric(dns_xact_in_quantiles_us_p99, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.In.QuantilesUS.P99)) - for _, nc := range windowMetrics.DNS.Xact.In.TopSlow { - ch <- prometheus.MustNewConstMetric( - dns_xact_in_top_slow, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - ch <- prometheus.MustNewConstMetric(dns_xact_in_total, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.In.Total)) - ch <- prometheus.MustNewConstMetric(dns_xact_out_quantiles_us_p50, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.Out.QuantilesUS.P50)) - ch <- prometheus.MustNewConstMetric(dns_xact_out_quantiles_us_p90, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.Out.QuantilesUS.P90)) - ch <- prometheus.MustNewConstMetric(dns_xact_out_quantiles_us_p95, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.Out.QuantilesUS.P95)) - ch <- prometheus.MustNewConstMetric(dns_xact_out_quantiles_us_p99, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.Out.QuantilesUS.P99)) - for _, nc := range windowMetrics.DNS.Xact.Out.TopSlow { - ch <- prometheus.MustNewConstMetric( - dns_xact_out_top_slow, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - ch <- prometheus.MustNewConstMetric(dns_xact_out_total, prometheus.GaugeValue, float64(windowMetrics.DNS.Xact.Out.Total)) - - for _, nc := range windowMetrics.DNS.TopQname2 { - ch <- prometheus.MustNewConstMetric( - dns_top_qname2, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopQname3 { - ch <- prometheus.MustNewConstMetric( - dns_top_qname3, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopNX { - ch <- prometheus.MustNewConstMetric( - dns_top_nxdomain, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopQtype { - ch <- prometheus.MustNewConstMetric( - dns_top_qtype, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopRcode { - ch <- prometheus.MustNewConstMetric( - dns_top_rcode, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopREFUSED { - ch <- prometheus.MustNewConstMetric( - dns_top_refused, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopSRVFAIL { - ch <- prometheus.MustNewConstMetric( - dns_top_srvfail, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.DNS.TopUDPPorts { - ch <- prometheus.MustNewConstMetric( - dns_top_udp_ports, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - - ch <- prometheus.MustNewConstMetric(packets_cardinality_dst_ips_out, prometheus.GaugeValue, float64(windowMetrics.Packets.Cardinality.DstIpsOut)) - ch <- prometheus.MustNewConstMetric(packets_cardinality_src_ips_in, prometheus.GaugeValue, float64(windowMetrics.Packets.Cardinality.SrcIpsIn)) - ch <- prometheus.MustNewConstMetric(packets_ipv4, prometheus.GaugeValue, float64(windowMetrics.Packets.Ipv4)) - ch <- prometheus.MustNewConstMetric(packets_ipv6, prometheus.GaugeValue, float64(windowMetrics.Packets.Ipv6)) - ch <- prometheus.MustNewConstMetric(packets_tcp, prometheus.GaugeValue, float64(windowMetrics.Packets.Tcp)) - ch <- prometheus.MustNewConstMetric(packets_total, prometheus.GaugeValue, float64(windowMetrics.Packets.Total)) - ch <- prometheus.MustNewConstMetric(packets_udp, prometheus.GaugeValue, float64(windowMetrics.Packets.Udp)) - ch <- prometheus.MustNewConstMetric(packets_in, prometheus.GaugeValue, float64(windowMetrics.Packets.In)) - ch <- prometheus.MustNewConstMetric(packets_out, prometheus.GaugeValue, float64(windowMetrics.Packets.Out)) - ch <- prometheus.MustNewConstMetric(packets_other_l4, prometheus.GaugeValue, float64(windowMetrics.Packets.OtherL4)) - ch <- prometheus.MustNewConstMetric(packets_deep_samples, prometheus.GaugeValue, float64(windowMetrics.Packets.DeepSamples)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_in_p50, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_in.P50)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_in_p90, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_in.P90)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_in_p95, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_in.P95)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_in_p99, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_in.P99)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_out_p50, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_out.P50)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_out_p90, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_out.P90)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_out_p95, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_out.P95)) - ch <- prometheus.MustNewConstMetric(packets_rates_pps_out_p99, prometheus.GaugeValue, float64(windowMetrics.Packets.Rates.Pps_out.P99)) - ch <- prometheus.MustNewConstMetric(period_start_ts, prometheus.GaugeValue, float64(windowMetrics.Period.StartTS)) - ch <- prometheus.MustNewConstMetric(period_length, prometheus.GaugeValue, float64(windowMetrics.Period.Length)) - - for _, nc := range windowMetrics.Packets.TopIpv4 { - ch <- prometheus.MustNewConstMetric( - packets_top_ipv4, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.Packets.TopIpv6 { - ch <- prometheus.MustNewConstMetric( - packets_top_ipv6, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.Packets.TopGeoLoc { - ch <- prometheus.MustNewConstMetric( - packets_top_geoloc, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - for _, nc := range windowMetrics.Packets.TopASN { - ch <- prometheus.MustNewConstMetric( - packets_top_asn, - prometheus.GaugeValue, - float64(nc.Estimate), - nc.Name, - ) - } - log.Infof("Pktvisor exporter finished") -} - -func init() { - prometheus.MustRegister(prom_version.NewCollector("pktvisor_exporter")) -} - -func main() { - var ( - listenAddress = flag.String("web.listen-address", ":9998", "Address to listen on for web interface and telemetry.") - metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.") - pktvisorHost = flag.String("pktvisor.host", "127.0.0.1", "Pktvisor server host") - pktvisorPort = flag.String("pktvisor.port", "10853", "Pktvisor server port") - period = flag.Int("period", 1, "Bucket period to collect") - ) - flag.Parse() - - log.Infoln("Starting pktvisor exporter", prom_version.Info()) - log.Infoln("Build context", prom_version.BuildContext()) - - exporter, err := NewExporter(*pktvisorHost, *pktvisorPort, *period) - if err != nil { - log.Errorf("Can't create exporter : %s", err) - os.Exit(1) - } - log.Infoln("Register exporter") - prometheus.MustRegister(exporter) - - http.Handle(*metricsPath, promhttp.Handler()) - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(` - Pktvisor Exporter - -

Pktvisor Exporter

-

Metrics

- - `)) - }) - - log.Infoln("Listening on", *listenAddress) - log.Fatal(http.ListenAndServe(*listenAddress, nil)) -} diff --git a/reporting/pktvisor_prometheus/pktvisor_exporter_test.go b/reporting/pktvisor_prometheus/pktvisor_exporter_test.go deleted file mode 100644 index 06ab7d0f9..000000000 --- a/reporting/pktvisor_prometheus/pktvisor_exporter_test.go +++ /dev/null @@ -1 +0,0 @@ -package main diff --git a/reporting/pktvisor_prometheus/pktvisor_prometheus b/reporting/pktvisor_prometheus/pktvisor_prometheus deleted file mode 100755 index 12d8efb49..000000000 Binary files a/reporting/pktvisor_prometheus/pktvisor_prometheus and /dev/null differ diff --git a/src/AbstractManager.h b/src/AbstractManager.h index 523691a98..e45f690b0 100644 --- a/src/AbstractManager.h +++ b/src/AbstractManager.h @@ -11,7 +11,7 @@ #include #include -namespace vizer { +namespace visor { /** * called from HTTP threads so must be thread safe diff --git a/src/AbstractMetricsManager.h b/src/AbstractMetricsManager.h index c8a5a24f1..23ed7409f 100644 --- a/src/AbstractMetricsManager.h +++ b/src/AbstractMetricsManager.h @@ -4,12 +4,7 @@ #pragma once -#include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" -#pragma GCC diagnostic ignored "-Wunused-parameter" -#include -#pragma GCC diagnostic pop +#include #include #include #include @@ -18,11 +13,13 @@ #pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" #include #pragma GCC diagnostic pop +#include "Metrics.h" #include #include #include #include -namespace vizer { + +namespace visor { using json = nlohmann::json; @@ -41,99 +38,6 @@ class PeriodException : public std::runtime_error using namespace std::chrono; -class Rate -{ -public: - typedef datasketches::kll_sketch QuantileType; - -private: - std::atomic_uint64_t _counter; - std::atomic_uint64_t _rate; - mutable std::shared_mutex _sketch_mutex; - QuantileType _quantile; - - std::shared_ptr _timer_handle; - high_resolution_clock::time_point _last_ts; - -public: - Rate() - : _counter(0) - , _rate(0.0) - , _quantile() - { - _quantile = QuantileType(); - _last_ts = high_resolution_clock::now(); - // all rates use a single static timer object which holds its own thread - // the tick argument determines the granularity of job running and canceling - static timer timer_thread{100ms}; - _timer_handle = timer_thread.set_interval(1s, [this] { - _rate.store(_counter.exchange(0)); - // lock mutex for write - std::unique_lock lock(_sketch_mutex); - _quantile.update(_rate); - }); - } - - ~Rate() - { - _timer_handle->cancel(); - } - - /** - * stop rate collection, ie. expect no more counter updates. - * does not affect the quantiles - in effect, it makes the rate read only - * must be thread safe - */ - void cancel() - { - _timer_handle->cancel(); - _rate.store(0); - _counter.store(0); - } - - Rate &operator++() - { - inc_counter(); - return *this; - } - - void inc_counter() - { - _counter.fetch_add(1, std::memory_order_relaxed); - } - - uint64_t counter() const - { - return _counter; - } - - uint64_t rate() const - { - return _rate; - } - - auto quantile_get_rlocked() const - { - std::shared_lock lock(_sketch_mutex); - struct retVals { - const QuantileType *quantile; - std::shared_lock lock; - }; - return retVals{&_quantile, std::move(lock)}; - } - - void merge(const Rate &other) - { - auto [o_quantile, o_lock] = other.quantile_get_rlocked(); - std::unique_lock w_lock(_sketch_mutex); - _quantile.merge(*o_quantile); - // the live rate to simply copied if non zero - if (other._rate != 0) { - _rate.store(other._rate, std::memory_order_relaxed); - } - } -}; - /** * This class should be specialized to contain metrics and sketches specific to this handler * It *MUST* be thread safe, and should expect mostly writes. @@ -142,8 +46,8 @@ class AbstractMetricsBucket { private: mutable std::shared_mutex _base_mutex; - uint64_t _num_samples = 0; - uint64_t _num_events = 0; + Counter _num_samples; + Counter _num_events; Rate _rate_events; @@ -151,6 +55,7 @@ class AbstractMetricsBucket timespec _end_tstamp; uint _period_length = 0; bool _read_only = false; + bool _recorded_stream = false; protected: // merge the metrics of the specialized metric bucket @@ -162,7 +67,9 @@ class AbstractMetricsBucket public: AbstractMetricsBucket() - : _rate_events() + : _num_samples("base", {"deep_samples"}, "Total number of deep samples") + , _num_events("base", {"total"}, "Total number of events") + , _rate_events("base", {"event_rate"}, "Rate of events") , _start_tstamp{0, 0} , _end_tstamp{0, 0} { @@ -186,7 +93,7 @@ class AbstractMetricsBucket uint period_length() const { std::shared_lock r_lock(_base_mutex); - if (_read_only) { + if (_read_only || _recorded_stream) { return _period_length; } timespec now; @@ -218,15 +125,43 @@ class AbstractMetricsBucket on_set_read_only(); } - auto event_data() const + bool recorded_stream() const + { + std::shared_lock r_lock(_base_mutex); + return _recorded_stream; + } + + void set_recorded_stream() + { + std::unique_lock w_lock(_base_mutex); + _recorded_stream = true; + } + + void set_event_rate_info(std::string schema_key, std::initializer_list names, const std::string &desc) + { + _rate_events.set_info(schema_key, names, desc); + } + + void set_num_sample_info(std::string schema_key, std::initializer_list names, const std::string &desc) + { + _num_samples.set_info(schema_key, names, desc); + } + + void set_num_events_info(std::string schema_key, std::initializer_list names, const std::string &desc) + { + _num_events.set_info(schema_key, names, desc); + } + + auto event_data_locked() const { - std::shared_lock lock(_base_mutex); struct eventData { - uint64_t num_events; - uint64_t num_samples; + const Counter *num_events; + const Counter *num_samples; const Rate *event_rate; + std::shared_lock r_lock; }; - return eventData{_num_events, _num_samples, &_rate_events}; + std::shared_lock lock(_base_mutex); + return eventData{&_num_events, &_num_samples, &_rate_events, std::move(lock)}; } void merge(const AbstractMetricsBucket &other) @@ -243,7 +178,6 @@ class AbstractMetricsBucket if (other._end_tstamp.tv_sec > _end_tstamp.tv_sec) { _end_tstamp.tv_sec = other._end_tstamp.tv_sec; } - _read_only = true; _rate_events.merge(other._rate_events); } specialized_merge(other); @@ -254,13 +188,14 @@ class AbstractMetricsBucket // note, currently not enforcing _read_only ++_rate_events; std::unique_lock lock(_base_mutex); - _num_events++; + ++_num_events; if (deep) { - _num_samples++; + ++_num_samples; } } virtual void to_json(json &j) const = 0; + virtual void to_prometheus(std::stringstream &out) const = 0; }; template @@ -289,6 +224,11 @@ class AbstractMetricsManager protected: std::atomic_bool _deep_sampling_now; // atomic so we can reference without mutex + /** + * indicates if the stream we are processing was pre recorded, not live + */ + bool _recorded_stream = false; + private: /** * window maintenance @@ -315,6 +255,9 @@ class AbstractMetricsManager // this changes the live bucket _metric_buckets.emplace_front(std::make_unique()); _metric_buckets[0]->set_start_tstamp(stamp); + if (_recorded_stream) { + _metric_buckets[0]->set_recorded_stream(); + } // notify second most recent bucket that it is now read only, save end time _metric_buckets[1]->set_read_only(stamp); // if we're at our period history length max, pop the oldest @@ -338,7 +281,6 @@ class AbstractMetricsManager static const uint MERGE_CACHE_TTL_MS = 1000; protected: - /** * the "base" event method that should be called on every event before specialized event functionality. sampling will be * chosen, and the time window will be maintained @@ -461,6 +403,14 @@ class AbstractMetricsManager _metric_buckets.front()->set_read_only(stamp); } + void set_recorded_stream() + { + std::unique_lock wl(_base_mutex); + std::shared_lock rl(_bucket_mutex); + _recorded_stream = true; + _metric_buckets.front()->set_recorded_stream(); + } + const MetricsBucketClass *bucket(uint64_t period) const { std::shared_lock rl(_bucket_mutex); @@ -500,6 +450,25 @@ class AbstractMetricsManager _metric_buckets.at(period)->to_json(j[period_str][key]); } + void window_single_prometheus(std::stringstream &out, uint64_t period = 0) const + { + std::shared_lock rl(_base_mutex); + std::shared_lock rbl(_bucket_mutex); + + if (period >= _num_periods) { + std::stringstream err; + err << "invalid metrics period, specify [0, " << _num_periods - 1 << "]"; + throw PeriodException(err.str()); + } + if (period >= _metric_buckets.size()) { + std::stringstream err; + err << "requested metrics period has not yet accumulated, current range is [0, " << _metric_buckets.size() - 1 << "]"; + throw PeriodException(err.str()); + } + + _metric_buckets.at(period)->to_prometheus(out); + } + void window_merged_json(json &j, const std::string &key, uint64_t period) const { std::shared_lock rl(_base_mutex); @@ -524,6 +493,9 @@ class AbstractMetricsManager } MetricsBucketClass merged; + if (_recorded_stream) { + merged.set_recorded_stream(); + } auto p = period; for (auto &m : _metric_buckets) { diff --git a/src/AbstractModule.h b/src/AbstractModule.h index b5d6e7fc0..6423f3483 100644 --- a/src/AbstractModule.h +++ b/src/AbstractModule.h @@ -12,7 +12,7 @@ #include #include -namespace vizer { +namespace visor { using json = nlohmann::json; diff --git a/src/AbstractPlugin.cpp b/src/AbstractPlugin.cpp index 46ccac514..04c055d1a 100644 --- a/src/AbstractPlugin.cpp +++ b/src/AbstractPlugin.cpp @@ -6,7 +6,7 @@ #include #include -namespace vizer { +namespace visor { void AbstractPlugin::_check_schema(json obj, SchemaMap &required, SchemaMap &optional) { diff --git a/src/AbstractPlugin.h b/src/AbstractPlugin.h index b4bd9cfa1..a292b2f42 100644 --- a/src/AbstractPlugin.h +++ b/src/AbstractPlugin.h @@ -11,7 +11,7 @@ #include #include -namespace vizer { +namespace visor { using json = nlohmann::json; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 449a18b15..b5fce816d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,48 +1,44 @@ -message(STATUS "vizer-core") +message(STATUS "visor-core") find_package(Corrade REQUIRED PluginManager) -pkg_check_modules(LIBMMDB REQUIRED libmaxminddb) set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) -add_library(vizer-core +add_library(visor-core AbstractPlugin.cpp InputModulePlugin.cpp HandlerModulePlugin.cpp GeoDB.cpp CoreServer.cpp - ) -add_library(Vizer::Core ALIAS vizer-core) + Metrics.cpp Metrics.h) +add_library(Visor::Core ALIAS visor-core) -target_include_directories(vizer-core +target_include_directories(visor-core PRIVATE - ${CMAKE_BINARY_DIR}/src # Vizer::Core config.h - PUBLIC - ${LIBMMDB_INCLUDE_DIRS} + ${CMAKE_BINARY_DIR}/src # Visor::Core config.h INTERFACE $ ) -target_link_libraries(vizer-core +target_link_libraries(visor-core PUBLIC datasketches rng timer + maxminddb ${CONAN_LIBS_CORRADE} ${CONAN_LIBS_SPDLOG} ${CONAN_LIBS_FMT} - ${LIBMMDB_LDFLAGS} - ${LIBMMDB_LIBRARIES} - ${VIZER_STATIC_PLUGINS} + ${VISOR_STATIC_PLUGINS} ) -configure_file(vizer_config.h.in vizer_config.h @ONLY) +configure_file(visor_config.h.in visor_config.h @ONLY) add_subdirectory(inputs) add_subdirectory(handlers) -set(VIZER_STATIC_PLUGINS ${VIZER_STATIC_PLUGINS} PARENT_SCOPE) +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} PARENT_SCOPE) ## TEST SUITE add_executable(unit-tests-vizor-core @@ -56,9 +52,9 @@ target_include_directories(unit-tests-vizor-core PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ) -target_link_libraries(unit-tests-vizor-core PRIVATE Vizer::Core) +target_link_libraries(unit-tests-vizor-core PRIVATE Visor::Core) add_test(NAME unit-tests-vizor-core - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/tests + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src COMMAND unit-tests-vizor-core ) \ No newline at end of file diff --git a/src/CoreServer.cpp b/src/CoreServer.cpp index fd79f4a12..8490ff101 100644 --- a/src/CoreServer.cpp +++ b/src/CoreServer.cpp @@ -3,12 +3,13 @@ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #include "CoreServer.h" -#include "vizer_config.h" +#include "Metrics.h" +#include "visor_config.h" #include #include #include -vizer::CoreServer::CoreServer(bool read_only, std::shared_ptr logger) +visor::CoreServer::CoreServer(bool read_only, std::shared_ptr logger, const PrometheusConfig &prom_config) : _svr(read_only) , _logger(logger) , _start_time(std::chrono::system_clock::now()) @@ -36,9 +37,12 @@ vizer::CoreServer::CoreServer(bool read_only, std::shared_ptr lo _handler_plugins.emplace_back(std::move(mod)); } - _setup_routes(); + _setup_routes(prom_config); + if (!prom_config.instance.empty()) { + Metric::add_base_label("instance", prom_config.instance); + } } -void vizer::CoreServer::start(const std::string &host, int port) +void visor::CoreServer::start(const std::string &host, int port) { if (!_svr.bind_to_port(host.c_str(), port)) { throw std::runtime_error("unable to bind host/port"); @@ -48,7 +52,7 @@ void vizer::CoreServer::start(const std::string &host, int port) throw std::runtime_error("error during listen"); } } -void vizer::CoreServer::stop() +void visor::CoreServer::stop() { _svr.stop(); @@ -68,11 +72,11 @@ void vizer::CoreServer::stop() } } } -vizer::CoreServer::~CoreServer() +visor::CoreServer::~CoreServer() { stop(); } -void vizer::CoreServer::_setup_routes() +void visor::CoreServer::_setup_routes(const PrometheusConfig &prom_config) { _logger->info("Initialize server control plane"); @@ -87,7 +91,7 @@ void vizer::CoreServer::_setup_routes() _svr.Get("/api/v1/metrics/app", [&]([[maybe_unused]] const httplib::Request &req, httplib::Response &res) { json j; try { - j["app"]["version"] = VIZER_VERSION_NUM; + j["app"]["version"] = VISOR_VERSION_NUM; j["app"]["up_time_min"] = float(std::chrono::duration_cast(std::chrono::system_clock::now() - _start_time).count()) / 60; res.set_content(j.dump(), "text/json"); } catch (const std::exception &e) { @@ -158,4 +162,25 @@ void vizer::CoreServer::_setup_routes() res.set_content(e.what(), "text/plain"); } }); + if (!prom_config.path.empty()) { + _logger->info("enabling prometheus metrics on: {}", prom_config.path); + _svr.Get(prom_config.path.c_str(), [&]([[maybe_unused]] const httplib::Request &req, httplib::Response &res) { + std::stringstream output; + try { + auto [handler_modules, hm_lock] = _handler_manager->module_get_all_locked(); + for (auto &[name, mod] : handler_modules) { + auto hmod = dynamic_cast(mod.get()); + if (hmod) { + spdlog::stopwatch sw; + hmod->window_prometheus(output); + _logger->debug("{} elapsed time: {}", hmod->name(), sw); + } + } + res.set_content(output.str(), "text/plain"); + } catch (const std::exception &e) { + res.status = 500; + res.set_content(e.what(), "text/plain"); + } + }); + } } diff --git a/src/CoreServer.h b/src/CoreServer.h index 818b4be07..66c34ce20 100644 --- a/src/CoreServer.h +++ b/src/CoreServer.h @@ -14,7 +14,12 @@ #include #include -namespace vizer { +namespace visor { + +struct PrometheusConfig { + std::string path; + std::string instance; +}; class CoreServer { @@ -31,7 +36,7 @@ class CoreServer HandlerPluginRegistry _handler_registry; std::vector _handler_plugins; - vizer::HttpServer _svr; + visor::HttpServer _svr; std::unique_ptr _input_manager; std::unique_ptr _handler_manager; @@ -39,10 +44,10 @@ class CoreServer std::shared_ptr _logger; std::chrono::system_clock::time_point _start_time; - void _setup_routes(); + void _setup_routes(const PrometheusConfig &prom_config); public: - CoreServer(bool read_only, std::shared_ptr logger); + CoreServer(bool read_only, std::shared_ptr logger, const PrometheusConfig &prom_config); ~CoreServer(); void start(const std::string &host, int port); diff --git a/src/GeoDB.cpp b/src/GeoDB.cpp index 92346856d..e78ae5274 100644 --- a/src/GeoDB.cpp +++ b/src/GeoDB.cpp @@ -6,7 +6,7 @@ #include #include -namespace vizer::geo { +namespace visor::geo { MaxmindDB &GeoIP() { diff --git a/src/GeoDB.h b/src/GeoDB.h index 9300473e8..ec6fd2c92 100644 --- a/src/GeoDB.h +++ b/src/GeoDB.h @@ -4,10 +4,13 @@ #pragma once +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" #include +#pragma GCC diagnostic pop #include -namespace vizer::geo { +namespace visor::geo { class MaxmindDB { diff --git a/src/HandlerManager.h b/src/HandlerManager.h index 936905cc8..6bce028b0 100644 --- a/src/HandlerManager.h +++ b/src/HandlerManager.h @@ -7,7 +7,7 @@ #include "AbstractManager.h" #include "StreamHandler.h" -namespace vizer { +namespace visor { /** * called from HTTP threads so must be thread safe diff --git a/src/HandlerModulePlugin.cpp b/src/HandlerModulePlugin.cpp index 3f1c98dbd..8859c0d3f 100644 --- a/src/HandlerModulePlugin.cpp +++ b/src/HandlerModulePlugin.cpp @@ -6,7 +6,7 @@ #include #include -namespace vizer { +namespace visor { void HandlerModulePlugin::init_module(InputStreamManager *im, HandlerManager *hm, HttpServer &svr) diff --git a/src/HandlerModulePlugin.h b/src/HandlerModulePlugin.h index b79e9a39c..b568ff16e 100644 --- a/src/HandlerModulePlugin.h +++ b/src/HandlerModulePlugin.h @@ -9,20 +9,20 @@ #include "InputStreamManager.h" #include -namespace vizer { +namespace visor { class HandlerModulePlugin : public AbstractPlugin { protected: - vizer::InputStreamManager *_input_manager; - vizer::HandlerManager *_handler_manager; + visor::InputStreamManager *_input_manager; + visor::HandlerManager *_handler_manager; virtual void _setup_routes(HttpServer &svr) = 0; public: static std::string pluginInterface() { - return "dev.vizer.module.handler/1.0"; + return "dev.visor.module.handler/1.0"; } static std::vector pluginSearchPaths() diff --git a/src/HttpServer.h b/src/HttpServer.h index b57581bbc..b890818da 100644 --- a/src/HttpServer.h +++ b/src/HttpServer.h @@ -7,7 +7,7 @@ #include #include -namespace vizer { +namespace visor { class HttpServer : public httplib::Server { bool _read_only = true; @@ -20,7 +20,7 @@ class HttpServer : public httplib::Server Server &Get(const char *pattern, Handler handler) { - spdlog::info("Registering GET {}", pattern); + spdlog::get("pktvisor")->info("Registering GET {}", pattern); return httplib::Server::Get(pattern, handler); } Server &Post(const char *pattern, Handler handler) @@ -28,7 +28,7 @@ class HttpServer : public httplib::Server if (_read_only) { return *this; } - spdlog::info("Registering POST {}", pattern); + spdlog::get("pktvisor")->info("Registering POST {}", pattern); return httplib::Server::Post(pattern, handler); } Server &Put(const char *pattern, Handler handler) @@ -36,7 +36,7 @@ class HttpServer : public httplib::Server if (_read_only) { return *this; } - spdlog::info("Registering PUT {}", pattern); + spdlog::get("pktvisor")->info("Registering PUT {}", pattern); return httplib::Server::Put(pattern, handler); } Server &Delete(const char *pattern, Handler handler) @@ -44,7 +44,7 @@ class HttpServer : public httplib::Server if (_read_only) { return *this; } - spdlog::info("Registering DELETE {}", pattern); + spdlog::get("pktvisor")->info("Registering DELETE {}", pattern); return httplib::Server::Delete(pattern, handler); } }; diff --git a/src/InputModulePlugin.cpp b/src/InputModulePlugin.cpp index 3719eb354..3b571c788 100644 --- a/src/InputModulePlugin.cpp +++ b/src/InputModulePlugin.cpp @@ -6,7 +6,7 @@ #include #include -namespace vizer { +namespace visor { void InputModulePlugin::init_module(InputStreamManager *im, HttpServer &svr) { diff --git a/src/InputModulePlugin.h b/src/InputModulePlugin.h index 6da6e3fcc..079395a87 100644 --- a/src/InputModulePlugin.h +++ b/src/InputModulePlugin.h @@ -8,20 +8,20 @@ #include "InputStreamManager.h" #include -namespace vizer { +namespace visor { class InputModulePlugin : public AbstractPlugin { protected: - vizer::InputStreamManager *_input_manager; + visor::InputStreamManager *_input_manager; virtual void _setup_routes(HttpServer &svr) = 0; public: static std::string pluginInterface() { - return "dev.vizer.module.input/1.0"; + return "dev.visor.module.input/1.0"; } static std::vector pluginSearchPaths() diff --git a/src/InputStream.h b/src/InputStream.h index 48476cde0..19d5a6852 100644 --- a/src/InputStream.h +++ b/src/InputStream.h @@ -7,7 +7,7 @@ #include "AbstractModule.h" #include "StreamHandler.h" -namespace vizer { +namespace visor { class InputStream : public AbstractModule { diff --git a/src/InputStreamManager.h b/src/InputStreamManager.h index f0cd4e07b..7ce3fa0d4 100644 --- a/src/InputStreamManager.h +++ b/src/InputStreamManager.h @@ -7,7 +7,7 @@ #include "AbstractManager.h" #include "InputStream.h" -namespace vizer { +namespace visor { /** * called from HTTP threads so must be thread safe diff --git a/src/Metrics.cpp b/src/Metrics.cpp new file mode 100644 index 000000000..d67e8540c --- /dev/null +++ b/src/Metrics.cpp @@ -0,0 +1,141 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "Metrics.h" +#include + +namespace visor { + +void Counter::to_json(json &j) const +{ + name_json_assign(j, _value); +} + +void Counter::to_prometheus(std::stringstream &out) const +{ + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; + out << "# TYPE " << base_name_snake() << " gauge" << std::endl; + out << name_snake() << ' ' << _value << std::endl; +} + +void Rate::to_json(json &j, bool include_live) const +{ + to_json(j); + if (include_live) { + name_json_assign(j, {"live"}, rate()); + } +} + +void Rate::to_json(visor::json &j) const +{ + const double fractions[4]{0.50, 0.90, 0.95, 0.99}; + + std::shared_lock lock(_sketch_mutex); + + auto quantiles = _quantile.get_quantiles(fractions, 4); + if (quantiles.size()) { + name_json_assign(j, {"p50"}, quantiles[0]); + name_json_assign(j, {"p90"}, quantiles[1]); + name_json_assign(j, {"p95"}, quantiles[2]); + name_json_assign(j, {"p99"}, quantiles[3]); + } +} + +void Rate::to_prometheus(std::stringstream &out) const +{ + const double fractions[4]{0.50, 0.90, 0.95, 0.99}; + + std::shared_lock lock(_sketch_mutex); + auto quantiles = _quantile.get_quantiles(fractions, 4); + + if (quantiles.size()) { + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; + out << "# TYPE " << base_name_snake() << " summary" << std::endl; + out << name_snake({}, {{"quantile", "0.5"}}) << ' ' << quantiles[0] << std::endl; + out << name_snake({}, {{"quantile", "0.9"}}) << ' ' << quantiles[1] << std::endl; + out << name_snake({}, {{"quantile", "0.95"}}) << ' ' << quantiles[2] << std::endl; + out << name_snake({}, {{"quantile", "0.99"}}) << ' ' << quantiles[3] << std::endl; + out << name_snake({"sum"}) << ' ' << _quantile.get_max_value() << std::endl; + out << name_snake({"count"}) << ' ' << _quantile.get_n() << std::endl; + } +} + +void Cardinality::merge(const Cardinality &other) +{ + datasketches::cpc_union merge_set; + merge_set.update(_set); + merge_set.update(other._set); + _set = merge_set.get_result(); +} +void Cardinality::to_json(json &j) const +{ + name_json_assign(j, lround(_set.get_estimate())); +} +void Cardinality::to_prometheus(std::stringstream &out) const +{ + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; + out << "# TYPE " << base_name_snake() << " gauge" << std::endl; + out << name_snake() << ' ' << lround(_set.get_estimate()) << std::endl; +} + +// static storage for base labels +Metric::LabelMap Metric::_base_labels; + +void Metric::name_json_assign(json &j, const json &val) const +{ + json *j_part = &j; + for (const auto &s_part : _name) { + j_part = &(*j_part)[s_part]; + } + (*j_part) = val; +} +void Metric::name_json_assign(json &j, std::initializer_list add_names, const json &val) const +{ + json *j_part = &j; + for (const auto &s_part : _name) { + j_part = &(*j_part)[s_part]; + } + for (const auto &s_part : add_names) { + j_part = &(*j_part)[s_part]; + } + (*j_part) = val; +} +std::string Metric::base_name_snake() const +{ + auto snake = [](const std::string &ss, const std::string &s) { + return ss.empty() ? s : ss + "_" + s; + }; + std::string name_text = _schema_key + "_" + std::accumulate(std::begin(_name), std::end(_name), std::string(), snake); + return name_text; +} + +std::string Metric::name_snake(std::initializer_list add_names, Metric::LabelMap add_labels) const +{ + std::string label_text{"{"}; + if (!_base_labels.empty()) { + for (const auto &[key, value] : _base_labels) { + label_text.append(key + "=\"" + value + "\","); + } + } + if (add_labels.size()) { + for (const auto &[key, value] : add_labels) { + label_text.append(key + "=\"" + value + "\","); + } + } + if (label_text.back() == ',') { + label_text.pop_back(); + } + label_text.push_back('}'); + auto snake = [](const std::string &ss, const std::string &s) { + return ss.empty() ? s : ss + "_" + s; + }; + std::string name_text = _schema_key + "_" + std::accumulate(std::begin(_name), std::end(_name), std::string(), snake); + if (add_names.size()) { + name_text.push_back('_'); + name_text.append(std::accumulate(std::begin(add_names), std::end(add_names), std::string(), snake)); + } + return name_text + label_text; +} + +} \ No newline at end of file diff --git a/src/Metrics.h b/src/Metrics.h new file mode 100644 index 000000000..fd5bfa081 --- /dev/null +++ b/src/Metrics.h @@ -0,0 +1,408 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#pragma once +#include +#include +#include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wold-style-cast" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wrange-loop-analysis" +#include +#include +#include +#pragma GCC diagnostic pop +#include +#include +#include + +namespace visor { + +using json = nlohmann::json; +using namespace std::chrono; + +class Metric +{ +public: + typedef std::map LabelMap; + +private: + /** + * labels which will be applied to all metrics + */ + static LabelMap _base_labels; + +protected: + std::vector _name; + std::string _desc; + std::string _schema_key; + +public: + Metric(std::string schema_key, std::initializer_list names, std::string desc) + : _name(names) + , _desc(std::move(desc)) + , _schema_key(schema_key) + { + } + + void set_info(std::string schema_key, std::initializer_list names, const std::string &desc) + { + _name.clear(); + _name = names; + _desc = desc; + _schema_key = schema_key; + } + + static void add_base_label(const std::string &label, const std::string &value) + { + _base_labels.emplace(label, value); + } + + void name_json_assign(json &j, const json &val) const; + void name_json_assign(json &j, std::initializer_list add_names, const json &val) const; + + [[nodiscard]] std::string base_name_snake() const; + [[nodiscard]] std::string name_snake(std::initializer_list add_names = {}, LabelMap add_labels = {}) const; + + virtual void to_json(json &j) const = 0; + virtual void to_prometheus(std::stringstream &out) const = 0; +}; + +/** + * A Counter metric class which knows how to render its output + * NOTE: intentionally _not_ thread safe; it should be protected by a mutex + */ +class Counter final : public Metric +{ + uint64_t _value = 0; + +public: + Counter(std::string schema_key, std::initializer_list names, std::string desc) + : Metric(schema_key, names, std::move(desc)) + { + } + + Counter &operator++() + { + ++_value; + return *this; + } + + [[nodiscard]] uint64_t value() const + { + return _value; + } + + void operator+=(uint64_t i) + { + _value += i; + } + + void operator+=(const Counter &other) + { + _value += other._value; + } + + // Metric + void to_json(json &j) const override; + void to_prometheus(std::stringstream &out) const override; +}; + +/** + * A Quantile metric class which knows how to render its output into p50, p90, p95, p99 + * + * NOTE: intentionally _not_ thread safe; it should be protected by a mutex + */ +template +class Quantile final : public Metric +{ + datasketches::kll_sketch _quantile; + +public: + Quantile(std::string schema_key, std::initializer_list names, std::string desc) + : Metric(schema_key, names, std::move(desc)) + { + } + + void update(const T &value) + { + _quantile.update(value); + } + + void update(T &&value) + { + _quantile.update(value); + } + + void merge(const Quantile &other) + { + _quantile.merge(other._quantile); + } + + auto get_n() const + { + return _quantile.get_n(); + } + + auto get_quantile(float p) const + { + return _quantile.get_quantile(p); + } + + // Metric + void to_json(json &j) const override + { + const double fractions[4]{0.50, 0.90, 0.95, 0.99}; + + auto quantiles = _quantile.get_quantiles(fractions, 4); + if (quantiles.size()) { + name_json_assign(j, {"p50"}, quantiles[0]); + name_json_assign(j, {"p90"}, quantiles[1]); + name_json_assign(j, {"p95"}, quantiles[2]); + name_json_assign(j, {"p99"}, quantiles[3]); + } + } + + void to_prometheus(std::stringstream &out) const override + { + const double fractions[4]{0.50, 0.90, 0.95, 0.99}; + + auto quantiles = _quantile.get_quantiles(fractions, 4); + + if (quantiles.size()) { + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; + out << "# TYPE " << base_name_snake() << " summary" << std::endl; + out << name_snake({}, {{"quantile", "0.5"}}) << ' ' << quantiles[0] << std::endl; + out << name_snake({}, {{"quantile", "0.9"}}) << ' ' << quantiles[1] << std::endl; + out << name_snake({}, {{"quantile", "0.95"}}) << ' ' << quantiles[2] << std::endl; + out << name_snake({}, {{"quantile", "0.99"}}) << ' ' << quantiles[3] << std::endl; + out << name_snake({"sum"}) << ' ' << _quantile.get_max_value() << std::endl; + out << name_snake({"count"}) << ' ' << _quantile.get_n() << std::endl; + } + } +}; + +/** + * A Frequent Item metric class which knows how to render its output into a table of top N + * + * NOTE: intentionally _not_ thread safe; it should be protected by a mutex + */ +template +class TopN final : public Metric +{ +public: + // + // https://datasketches.github.io/docs/Frequency/FrequentItemsErrorTable.html + // + // we need to size for stream length of (essentially) pps within MetricsMgr::PERIOD_SEC + // at close to ~1 mil PPS (5.6E+07 per 60s) we can hit being off by ~24000 at max map size of 8192 + // this number also affects memory usage, by limiting the number of objects tracked + // e.g. up to MAX_FI_MAP_SIZE strings (ints, etc) may be stored per sketch + // note that the actual storage space for the strings is on the heap and not counted here, though. + const uint8_t START_FI_MAP_SIZE = 7; // 2^7 = 128 + const uint8_t MAX_FI_MAP_SIZE = 13; // 2^13 = 8192 + +private: + datasketches::frequent_items_sketch _fi; + size_t _top_count = 10; + +public: + TopN(std::string schema_key, std::initializer_list names, std::string desc) + : Metric(schema_key, names, std::move(desc)) + , _fi(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) + { + } + + void update(const T &value) + { + _fi.update(value); + } + + void update(T &&value) + { + _fi.update(value); + } + + void merge(const TopN &other) + { + _fi.merge(other._fi); + } + + /** + * to_json which takes a formater to format the "name" + * @param j json object + * @param formatter std::function which takes a T as input (the type store it in top table) it needs to return a std::string + */ + void to_json(json &j, std::function formatter) const + { + auto section = json::array(); + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + section[i]["name"] = formatter(items[i].get_item()); + section[i]["estimate"] = items[i].get_estimate(); + } + name_json_assign(j, section); + } + + void to_prometheus(std::stringstream &out, std::function formatter) const + { + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; + out << "# TYPE " << base_name_snake() << " gauge" << std::endl; + out << name_snake({}, {{"name", formatter(items[i].get_item())}}) << ' ' << items[i].get_estimate() << std::endl; + } + } + + // Metric + void to_json(json &j) const override + { + auto section = json::array(); + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + section[i]["name"] = items[i].get_item(); + section[i]["estimate"] = items[i].get_estimate(); + } + name_json_assign(j, section); + } + + void to_prometheus(std::stringstream &out) const override + { + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; + out << "# TYPE " << base_name_snake() << " gauge" << std::endl; + std::stringstream name_text; + name_text << items[i].get_item(); + out << name_snake({}, {{"name", name_text.str()}}) << ' ' << items[i].get_estimate() << std::endl; + } + } +}; + +/** + * A Cardinality metric class which knows how to render its output + * + * NOTE: intentionally _not_ thread safe; it should be protected by a mutex + */ +class Cardinality final : public Metric +{ + datasketches::cpc_sketch _set; + +public: + Cardinality(std::string schema_key, std::initializer_list names, std::string desc) + : Metric(schema_key, names, std::move(desc)) + { + } + + template + void update(const T &value) + { + _set.update(value); + } + + template + void update(T &&value) + { + _set.update(value); + } + + void update(const void *value, int size) + { + _set.update(value, size); + } + + void merge(const Cardinality &other); + + // Metric + void to_json(json &j) const override; + void to_prometheus(std::stringstream &out) const override; +}; + +/** + * A Rate metric class which knows how to render its output. Note that this is only useful for "live" rates, + * that is, calculating rates in real time and not from pre recorded streams + * + * NOTE: this class _is_ thread safe, it _does not_ need an additional mutex + */ +class Rate final : public Metric +{ + std::atomic_uint64_t _counter; + std::atomic_uint64_t _rate; + mutable std::shared_mutex _sketch_mutex; + datasketches::kll_sketch _quantile; + + std::shared_ptr _timer_handle; + + void _start_timer() + { + // all rates use a single static timer object which holds its own thread + // the tick argument determines the granularity of job running and canceling + static timer timer_thread{100ms}; + _timer_handle = timer_thread.set_interval(1s, [this] { + _rate.store(_counter.exchange(0)); + // lock mutex for write + std::unique_lock lock(_sketch_mutex); + _quantile.update(_rate); + }); + } + +public: + Rate(std::string schema_key, std::initializer_list names, std::string desc) + : Metric(schema_key, names, std::move(desc)) + , _counter(0) + , _rate(0) + , _quantile() + { + _start_timer(); + } + + ~Rate() + { + _timer_handle->cancel(); + } + + /** + * stop rate collection, ie. expect no more counter updates. + * does not affect the quantiles - in effect, it makes the rate read only + * must be thread safe + */ + void cancel() + { + _timer_handle->cancel(); + _rate.store(0, std::memory_order_relaxed); + _counter.store(0, std::memory_order_relaxed); + } + + Rate &operator++() + { + _counter.fetch_add(1, std::memory_order_relaxed); + return *this; + } + + uint64_t rate() const + { + return _rate.load(std::memory_order_relaxed); + } + + void merge(const Rate &other) + { + std::shared_lock r_lock(other._sketch_mutex); + std::unique_lock w_lock(_sketch_mutex); + _quantile.merge(other._quantile); + // the live rate is simply copied if non zero + if (other._rate != 0) { + _rate.store(other._rate, std::memory_order_relaxed); + } + } + + void to_json(json &j, bool include_live) const; + + // Metric + void to_json(json &j) const override; + void to_prometheus(std::stringstream &out) const override; +}; + +} \ No newline at end of file diff --git a/src/StreamHandler.h b/src/StreamHandler.h index 2360a3cc7..159a00c05 100644 --- a/src/StreamHandler.h +++ b/src/StreamHandler.h @@ -9,7 +9,7 @@ #include #include -namespace vizer { +namespace visor { using json = nlohmann::json; @@ -25,6 +25,7 @@ class StreamHandler : public AbstractModule virtual ~StreamHandler(){}; virtual void window_json(json &j, uint64_t period, bool merged) = 0; + virtual void window_prometheus(std::stringstream &out) = 0; }; template @@ -42,7 +43,6 @@ class StreamMetricsHandler : public StreamHandler j["metrics"]["periods_configured"] = _metrics->num_periods(); j["metrics"]["periods"] = json::array(); - const double fractions[4]{0.50, 0.90, 0.95, 0.99}; for (auto i = 0UL; i < _metrics->current_periods(); ++i) { { std::stringstream ssts; @@ -58,20 +58,10 @@ class StreamMetricsHandler : public StreamHandler } j["metrics"]["periods"][i]["read_only"] = _metrics->bucket(i)->read_only(); j["metrics"]["periods"][i]["length"] = _metrics->bucket(i)->period_length(); - auto [num_events, num_samples, event_rate] = _metrics->bucket(i)->event_data(); - j["metrics"]["periods"][i]["events"]["total"] = num_events; - j["metrics"]["periods"][i]["events"]["deep_samples"] = num_samples; - if (!_metrics->bucket(i)->read_only()) { - j["metrics"]["periods"][i]["events"]["rates"]["live"] = event_rate->rate(); - } - auto [rate_quantile, rate_lock] = event_rate->quantile_get_rlocked(); - auto quantiles = rate_quantile->get_quantiles(fractions, 4); - if (quantiles.size()) { - j["metrics"]["periods"][i]["events"]["rates"]["p50"] = quantiles[0]; - j["metrics"]["periods"][i]["events"]["rates"]["p90"] = quantiles[1]; - j["metrics"]["periods"][i]["events"]["rates"]["p95"] = quantiles[2]; - j["metrics"]["periods"][i]["events"]["rates"]["p99"] = quantiles[3]; - } + auto [num_events, num_samples, event_rate, event_lock] = _metrics->bucket(i)->event_data_locked(); + num_events->to_json(j["metrics"]["periods"][i]["events"]); + num_samples->to_json(j["metrics"]["periods"][i]["events"]); + event_rate->to_json(j["metrics"]["periods"][i]["events"]["rates"], !_metrics->bucket(i)->read_only()); } } diff --git a/src/handlers/CMakeLists.txt b/src/handlers/CMakeLists.txt index 47674c6d7..97ffdf919 100644 --- a/src/handlers/CMakeLists.txt +++ b/src/handlers/CMakeLists.txt @@ -2,4 +2,4 @@ add_subdirectory(net) add_subdirectory(dns) -set(VIZER_STATIC_PLUGINS ${VIZER_STATIC_PLUGINS} PARENT_SCOPE) +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} PARENT_SCOPE) diff --git a/src/handlers/README.md b/src/handlers/README.md new file mode 100644 index 000000000..66202b9ae --- /dev/null +++ b/src/handlers/README.md @@ -0,0 +1,9 @@ +# Stream Handler Modules + +This directory contains the built-in stream handler modules. These attach to [stream input events](/src/inputs) and +summarize them, typically using a time window of efficient sketch algorithms and counters which are exposed as metrics. + +See the individual READMEs for more information: + +* [Network](net/) +* [DNS](dns/) diff --git a/src/handlers/dns/CMakeLists.txt b/src/handlers/dns/CMakeLists.txt index 878e0bbeb..867d0b4d6 100644 --- a/src/handlers/dns/CMakeLists.txt +++ b/src/handlers/dns/CMakeLists.txt @@ -2,7 +2,7 @@ message(STATUS "Handler Module: DNS") set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) -corrade_add_static_plugin(VizerHandlerDns +corrade_add_static_plugin(VisorHandlerDns ${CMAKE_CURRENT_BINARY_DIR} DnsHandler.conf DnsHandlerModulePlugin.cpp @@ -14,30 +14,18 @@ corrade_add_static_plugin(VizerHandlerDns DnsResource.cpp DnsResourceData.cpp ) -add_library(Vizer::Handler::Dns ALIAS VizerHandlerDns) +add_library(Visor::Handler::Dns ALIAS VisorHandlerDns) -target_include_directories(VizerHandlerDns +target_include_directories(VisorHandlerDns INTERFACE $ ) -target_link_libraries(VizerHandlerDns +target_link_libraries(VisorHandlerDns PUBLIC - Vizer::Input::Pcap + Visor::Input::Pcap ) -set(VIZER_STATIC_PLUGINS ${VIZER_STATIC_PLUGINS} Vizer::Handler::Dns PARENT_SCOPE) +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} Visor::Handler::Dns PARENT_SCOPE) -## TEST SUITE -add_executable(unit-tests-handler-dns - tests/main.cpp - tests/test_dns.cpp - tests/test_dns_layer.cpp - ) - -target_link_libraries(unit-tests-handler-dns PRIVATE Vizer::Handler::Dns) - -add_test(NAME unit-tests-handler-dns - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/tests - COMMAND unit-tests-handler-dns - ) \ No newline at end of file +add_subdirectory(tests) \ No newline at end of file diff --git a/src/handlers/dns/DnsHandlerModulePlugin.cpp b/src/handlers/dns/DnsHandlerModulePlugin.cpp index f5ab85426..126ca040b 100644 --- a/src/handlers/dns/DnsHandlerModulePlugin.cpp +++ b/src/handlers/dns/DnsHandlerModulePlugin.cpp @@ -8,12 +8,12 @@ #include #include -CORRADE_PLUGIN_REGISTER(VizerHandlerDns, vizer::handler::dns::DnsHandlerModulePlugin, - "dev.vizer.module.handler/1.0") +CORRADE_PLUGIN_REGISTER(VisorHandlerDns, visor::handler::dns::DnsHandlerModulePlugin, + "dev.visor.module.handler/1.0") -namespace vizer::handler::dns { +namespace visor::handler::dns { -using namespace vizer::input::pcap; +using namespace visor::input::pcap; using json = nlohmann::json; void DnsHandlerModulePlugin::_setup_routes(HttpServer &svr) diff --git a/src/handlers/dns/DnsHandlerModulePlugin.h b/src/handlers/dns/DnsHandlerModulePlugin.h index 21aa68ac2..0f84102ee 100644 --- a/src/handlers/dns/DnsHandlerModulePlugin.h +++ b/src/handlers/dns/DnsHandlerModulePlugin.h @@ -6,7 +6,7 @@ #include "HandlerModulePlugin.h" -namespace vizer::handler::dns { +namespace visor::handler::dns { class DnsHandlerModulePlugin : public HandlerModulePlugin { @@ -16,7 +16,7 @@ class DnsHandlerModulePlugin : public HandlerModulePlugin public: explicit DnsHandlerModulePlugin(Corrade::PluginManager::AbstractManager &manager, const std::string &plugin) - : vizer::HandlerModulePlugin{manager, plugin} + : visor::HandlerModulePlugin{manager, plugin} { } diff --git a/src/handlers/dns/DnsLayer.cpp b/src/handlers/dns/DnsLayer.cpp index 9e319b8c7..ae475ff8b 100644 --- a/src/handlers/dns/DnsLayer.cpp +++ b/src/handlers/dns/DnsLayer.cpp @@ -12,7 +12,7 @@ #include #include -namespace vizer::handler::dns { +namespace visor::handler::dns { DnsLayer::DnsLayer(uint8_t *data, size_t dataLen, Layer *prevLayer, pcpp::Packet *packet) : Layer(data, dataLen, prevLayer, packet) @@ -23,7 +23,7 @@ DnsLayer::DnsLayer(uint8_t *data, size_t dataLen, Layer *prevLayer, pcpp::Packet m_FirstQuery = NULL; m_FirstAnswer = NULL; m_FirstAuthority = NULL; - m_FirstAdditional = NULL; + m_FirstAdditional = NULL; } @@ -838,4 +838,4 @@ bool DnsLayer::removeResource(IDnsResource* resourceToRemove) return true; } -} // namespace vizer +} // namespace visor diff --git a/src/handlers/dns/DnsLayer.h b/src/handlers/dns/DnsLayer.h index 868a49fb2..73d774de2 100644 --- a/src/handlers/dns/DnsLayer.h +++ b/src/handlers/dns/DnsLayer.h @@ -17,9 +17,9 @@ /// @file /** - * \namespace vizer + * \namespace visor */ -namespace vizer::handler::dns { +namespace visor::handler::dns { /** * @struct dnshdr @@ -29,7 +29,7 @@ namespace vizer::handler::dns { #pragma pack(push, 1) struct dnshdr { /** DNS query identification */ - uint16_t transactionID; + uint16_t transactionID; #if (BYTE_ORDER == LITTLE_ENDIAN) uint16_t /** Recursion desired flag */ @@ -485,6 +485,6 @@ struct dnshdr { } } - } // namespace vizer + } // namespace visor #endif /* PV_PACKETPP_DNS_LAYER */ diff --git a/src/handlers/dns/DnsLayerEnums.h b/src/handlers/dns/DnsLayerEnums.h index 91ec9fcd0..620761e7e 100644 --- a/src/handlers/dns/DnsLayerEnums.h +++ b/src/handlers/dns/DnsLayerEnums.h @@ -4,9 +4,9 @@ /// @file /** - * \namespace vizer + * \namespace visor */ -namespace vizer::handler::dns { +namespace visor::handler::dns { /** * An enum for all possible DNS record types */ @@ -16,8 +16,8 @@ enum DnsType { /** Name Server record */ DNS_TYPE_NS, /** Obsolete, replaced by MX */ - DNS_TYPE_MD, - /** Obsolete, replaced by MX */ + DNS_TYPE_MD, + /** Obsolete, replaced by MX */ DNS_TYPE_MF, /** Canonical name record */ DNS_TYPE_CNAME, diff --git a/src/handlers/dns/DnsResource.cpp b/src/handlers/dns/DnsResource.cpp index 466cd6434..59d5296b9 100644 --- a/src/handlers/dns/DnsResource.cpp +++ b/src/handlers/dns/DnsResource.cpp @@ -9,7 +9,7 @@ #include #include -namespace vizer::handler::dns { +namespace visor::handler::dns { IDnsResource::IDnsResource(DnsLayer *dnsLayer, size_t offsetInLayer) : m_DnsLayer(dnsLayer) diff --git a/src/handlers/dns/DnsResource.h b/src/handlers/dns/DnsResource.h index 8c79ad4d7..a8f1d2df8 100644 --- a/src/handlers/dns/DnsResource.h +++ b/src/handlers/dns/DnsResource.h @@ -11,9 +11,9 @@ /// @file /** - * \namespace vizer + * \namespace visor */ -namespace vizer::handler::dns { +namespace visor::handler::dns { // forward declarations class DnsLayer; class IDnsResourceData; @@ -24,7 +24,7 @@ class DnsResourceDataPtr; * An abstract class for representing all types of DNS records. This class gives access to all available record data such as DNS type, class, * name, type of record, etc. The DnsLayer holds an instance of (inherited type of) this class for each DNS record in the DNS packet */ - class IDnsResource +class IDnsResource { protected: friend class DnsLayer; diff --git a/src/handlers/dns/DnsResourceData.cpp b/src/handlers/dns/DnsResourceData.cpp index 78c5e974b..1d570bbcf 100644 --- a/src/handlers/dns/DnsResourceData.cpp +++ b/src/handlers/dns/DnsResourceData.cpp @@ -13,7 +13,7 @@ // forked code #pragma GCC diagnostic ignored "-Wunused-parameter" -namespace vizer::handler::dns { +namespace visor::handler::dns { size_t IDnsResourceData::decodeName(const char *encodedName, char *result, IDnsResource *dnsResource) const { diff --git a/src/handlers/dns/DnsResourceData.h b/src/handlers/dns/DnsResourceData.h index 919aa3261..227da928c 100644 --- a/src/handlers/dns/DnsResourceData.h +++ b/src/handlers/dns/DnsResourceData.h @@ -10,9 +10,9 @@ /// @file /** - * \namespace vizer + * \namespace visor */ -namespace vizer::handler::dns { +namespace visor::handler::dns { //Visual studio has always been stupid about returning something useful for __cplusplus //Only recently was this fixed - and even then it requires a specific hack to the command line during build @@ -23,7 +23,7 @@ namespace vizer::handler::dns { #define PCPP_SMART_PTR(T) std::unique_ptr #else #define PCPP_SMART_PTR(T) std::auto_ptr - #endif +#endif // forward declarations class IDnsResource; diff --git a/src/handlers/dns/DnsStreamHandler.cpp b/src/handlers/dns/DnsStreamHandler.cpp index 0d4d36605..1869ce6db 100644 --- a/src/handlers/dns/DnsStreamHandler.cpp +++ b/src/handlers/dns/DnsStreamHandler.cpp @@ -15,13 +15,12 @@ #include #pragma GCC diagnostic pop #include -#include #include -namespace vizer::handler::dns { +namespace visor::handler::dns { DnsStreamHandler::DnsStreamHandler(const std::string &name, PcapInputStream *stream, uint periods, int deepSampleRate) - : vizer::StreamMetricsHandler(name, periods, deepSampleRate) + : visor::StreamMetricsHandler(name, periods, deepSampleRate) , _stream(stream) { assert(stream); @@ -33,6 +32,10 @@ void DnsStreamHandler::start() return; } + if (config_exists("recorded_stream")) { + _metrics->set_recorded_stream(); + } + _pkt_udp_connection = _stream->udp_signal.connect(&DnsStreamHandler::process_udp_packet_cb, this); _start_tstamp_connection = _stream->start_tstamp_signal.connect(&DnsStreamHandler::set_start_tstamp, this); _end_tstamp_connection = _stream->end_tstamp_signal.connect(&DnsStreamHandler::set_end_tstamp, this); @@ -200,7 +203,14 @@ void DnsStreamHandler::tcp_connection_end_cb(const pcpp::ConnectionData &connect // remove the connection from the connection manager _tcp_connections.erase(iter); } - +void DnsStreamHandler::window_prometheus(std::stringstream &out) +{ + if (_metrics->current_periods() > 1) { + _metrics->window_single_prometheus(out, 1); + } else { + _metrics->window_single_prometheus(out, 0); + } +} void DnsStreamHandler::window_json(json &j, uint64_t period, bool merged) { if (merged) { @@ -249,10 +259,7 @@ void DnsMetricsBucket::specialized_merge(const AbstractMetricsBucket &o) _dnsXactFromTimeUs.merge(other._dnsXactFromTimeUs); _dnsXactToTimeUs.merge(other._dnsXactToTimeUs); - datasketches::cpc_union merge_qnameCard; - merge_qnameCard.update(_dns_qnameCard); - merge_qnameCard.update(other._dns_qnameCard); - _dns_qnameCard = merge_qnameCard.get_result(); + _dns_qnameCard.merge(other._dns_qnameCard); _dns_topQname2.merge(other._dns_topQname2); _dns_topQname3.merge(other._dns_topQname3); @@ -269,161 +276,59 @@ void DnsMetricsBucket::specialized_merge(const AbstractMetricsBucket &o) void DnsMetricsBucket::to_json(json &j) const { - const double fractions[4]{0.50, 0.90, 0.95, 0.99}; + bool live_rates = !read_only() && !recorded_stream(); + auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - auto [num_events, num_samples, event_rate] = event_data(); // thread safe - { - if (!read_only()) { - j["wire_packets"]["rates"]["total"]["live"] = event_rate->rate(); - } - auto [rate_quantile, rate_lock] = event_rate->quantile_get_rlocked(); - auto quantiles = rate_quantile->get_quantiles(fractions, 4); - if (quantiles.size()) { - j["wire_packets"]["rates"]["total"]["p50"] = quantiles[0]; - j["wire_packets"]["rates"]["total"]["p90"] = quantiles[1]; - j["wire_packets"]["rates"]["total"]["p95"] = quantiles[2]; - j["wire_packets"]["rates"]["total"]["p99"] = quantiles[3]; - } - } + event_rate->to_json(j, live_rates); + num_events->to_json(j); + num_samples->to_json(j); std::shared_lock r_lock(_mutex); - j["wire_packets"]["total"] = num_events; - j["wire_packets"]["deep_samples"] = num_samples; - j["wire_packets"]["queries"] = _counters.queries; - j["wire_packets"]["replies"] = _counters.replies; - j["wire_packets"]["tcp"] = _counters.TCP; - j["wire_packets"]["udp"] = _counters.UDP; - j["wire_packets"]["ipv4"] = _counters.IPv4; - j["wire_packets"]["ipv6"] = _counters.IPv6; - j["wire_packets"]["nxdomain"] = _counters.NX; - j["wire_packets"]["refused"] = _counters.REFUSED; - j["wire_packets"]["srvfail"] = _counters.SRVFAIL; - j["wire_packets"]["noerror"] = _counters.NOERROR; - - j["cardinality"]["qname"] = lround(_dns_qnameCard.get_estimate()); - j["xact"]["counts"]["total"] = _counters.xacts_total; - j["xact"]["counts"]["timed_out"] = _counters.xacts_timed_out; - - { - j["xact"]["in"]["total"] = _counters.xacts_in; - j["xact"]["in"]["top_slow"] = nlohmann::json::array(); - auto items = _dns_slowXactIn.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["xact"]["in"]["top_slow"][i]["name"] = items[i].get_item(); - j["xact"]["in"]["top_slow"][i]["estimate"] = items[i].get_estimate(); - } - } - - auto d_quantiles = _dnsXactFromTimeUs.get_quantiles(fractions, 4); - if (d_quantiles.size()) { - j["xact"]["out"]["quantiles_us"]["p50"] = d_quantiles[0]; - j["xact"]["out"]["quantiles_us"]["p90"] = d_quantiles[1]; - j["xact"]["out"]["quantiles_us"]["p95"] = d_quantiles[2]; - j["xact"]["out"]["quantiles_us"]["p99"] = d_quantiles[3]; - } - - d_quantiles = _dnsXactToTimeUs.get_quantiles(fractions, 4); - if (d_quantiles.size()) { - j["xact"]["in"]["quantiles_us"]["p50"] = d_quantiles[0]; - j["xact"]["in"]["quantiles_us"]["p90"] = d_quantiles[1]; - j["xact"]["in"]["quantiles_us"]["p95"] = d_quantiles[2]; - j["xact"]["in"]["quantiles_us"]["p99"] = d_quantiles[3]; - } - - { - j["xact"]["out"]["total"] = _counters.xacts_out; - j["xact"]["out"]["top_slow"] = nlohmann::json::array(); - auto items = _dns_slowXactOut.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["xact"]["out"]["top_slow"][i]["name"] = items[i].get_item(); - j["xact"]["out"]["top_slow"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_udp_ports"] = nlohmann::json::array(); - auto items = _dns_topUDPPort.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_udp_ports"][i]["name"] = std::to_string(items[i].get_item()); - j["top_udp_ports"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_qname2"] = nlohmann::json::array(); - auto items = _dns_topQname2.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_qname2"][i]["name"] = items[i].get_item(); - j["top_qname2"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_qname3"] = nlohmann::json::array(); - auto items = _dns_topQname3.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_qname3"][i]["name"] = items[i].get_item(); - j["top_qname3"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_nxdomain"] = nlohmann::json::array(); - auto items = _dns_topNX.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_nxdomain"][i]["name"] = items[i].get_item(); - j["top_nxdomain"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_refused"] = nlohmann::json::array(); - auto items = _dns_topREFUSED.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_refused"][i]["name"] = items[i].get_item(); - j["top_refused"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_srvfail"] = nlohmann::json::array(); - auto items = _dns_topSRVFAIL.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_srvfail"][i]["name"] = items[i].get_item(); - j["top_srvfail"][i]["estimate"] = items[i].get_estimate(); - } - } - - { - j["top_rcode"] = nlohmann::json::array(); - auto items = _dns_topRCode.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - if (RCodeNames.find(items[i].get_item()) != RCodeNames.end()) { - j["top_rcode"][i]["name"] = RCodeNames[items[i].get_item()]; - } else { - std::stringstream keyBuf; - keyBuf << items[i].get_item(); - j["top_rcode"][i]["name"] = keyBuf.str(); - } - j["top_rcode"][i]["estimate"] = items[i].get_estimate(); + _counters.queries.to_json(j); + _counters.replies.to_json(j); + _counters.TCP.to_json(j); + _counters.UDP.to_json(j); + _counters.IPv4.to_json(j); + _counters.IPv6.to_json(j); + _counters.NX.to_json(j); + _counters.REFUSED.to_json(j); + _counters.SRVFAIL.to_json(j); + _counters.NOERROR.to_json(j); + + _dns_qnameCard.to_json(j); + _counters.xacts_total.to_json(j); + _counters.xacts_timed_out.to_json(j); + + _counters.xacts_in.to_json(j); + _dns_slowXactIn.to_json(j); + + _dnsXactFromTimeUs.to_json(j); + _dnsXactToTimeUs.to_json(j); + + _counters.xacts_out.to_json(j); + _dns_slowXactOut.to_json(j); + + _dns_topUDPPort.to_json(j, [](const uint16_t &val) { return std::to_string(val); }); + _dns_topQname2.to_json(j); + _dns_topQname3.to_json(j); + _dns_topNX.to_json(j); + _dns_topREFUSED.to_json(j); + _dns_topSRVFAIL.to_json(j); + _dns_topRCode.to_json(j, [](const uint16_t &val) { + if (RCodeNames.find(val) != RCodeNames.end()) { + return RCodeNames[val]; + } else { + return std::to_string(val); } - } - - { - j["top_qtype"] = nlohmann::json::array(); - auto items = _dns_topQType.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - if (QTypeNames.find(items[i].get_item()) != QTypeNames.end()) { - j["top_qtype"][i]["name"] = QTypeNames[items[i].get_item()]; - } else { - std::stringstream keyBuf; - keyBuf << items[i].get_item(); - j["top_qtype"][i]["name"] = keyBuf.str(); - } - j["top_qtype"][i]["estimate"] = items[i].get_estimate(); + }); + _dns_topQType.to_json(j, [](const uint16_t &val) { + if (QTypeNames.find(val) != QTypeNames.end()) { + return QTypeNames[val]; + } else { + return std::to_string(val); } - } + }); } // the main bucket analysis @@ -543,6 +448,61 @@ void DnsMetricsBucket::new_dns_transaction(bool deep, float to90th, float from90 } } } +void DnsMetricsBucket::to_prometheus(std::stringstream &out) const +{ + auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe + + event_rate->to_prometheus(out); + num_events->to_prometheus(out); + num_samples->to_prometheus(out); + + std::shared_lock r_lock(_mutex); + + _counters.queries.to_prometheus(out); + _counters.replies.to_prometheus(out); + _counters.TCP.to_prometheus(out); + _counters.UDP.to_prometheus(out); + _counters.IPv4.to_prometheus(out); + _counters.IPv6.to_prometheus(out); + _counters.NX.to_prometheus(out); + _counters.REFUSED.to_prometheus(out); + _counters.SRVFAIL.to_prometheus(out); + _counters.NOERROR.to_prometheus(out); + + _dns_qnameCard.to_prometheus(out); + _counters.xacts_total.to_prometheus(out); + _counters.xacts_timed_out.to_prometheus(out); + + _counters.xacts_in.to_prometheus(out); + _dns_slowXactIn.to_prometheus(out); + + _dnsXactFromTimeUs.to_prometheus(out); + _dnsXactToTimeUs.to_prometheus(out); + + _counters.xacts_out.to_prometheus(out); + _dns_slowXactOut.to_prometheus(out); + + _dns_topUDPPort.to_prometheus(out, [](const uint16_t &val) { return std::to_string(val); }); + _dns_topQname2.to_prometheus(out); + _dns_topQname3.to_prometheus(out); + _dns_topNX.to_prometheus(out); + _dns_topREFUSED.to_prometheus(out); + _dns_topSRVFAIL.to_prometheus(out); + _dns_topRCode.to_prometheus(out, [](const uint16_t &val) { + if (RCodeNames.find(val) != RCodeNames.end()) { + return RCodeNames[val]; + } else { + return std::to_string(val); + } + }); + _dns_topQType.to_prometheus(out, [](const uint16_t &val) { + if (QTypeNames.find(val) != QTypeNames.end()) { + return QTypeNames[val]; + } else { + return std::to_string(val); + } + }); +} // the general metrics manager entry point (both UDP and TCP) void DnsMetricsManager::process_dns_layer(DnsLayer &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, uint32_t flowkey, uint16_t port, timespec stamp) diff --git a/src/handlers/dns/DnsStreamHandler.h b/src/handlers/dns/DnsStreamHandler.h index 501e0ebcb..f57fc11de 100644 --- a/src/handlers/dns/DnsStreamHandler.h +++ b/src/handlers/dns/DnsStreamHandler.h @@ -9,89 +9,96 @@ #include "StreamHandler.h" #include "dns.h" #include "querypairmgr.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma clang diagnostic ignored "-Wrange-loop-analysis" -#include -#include -#include -#pragma GCC diagnostic pop #include #include -namespace vizer::handler::dns { +namespace visor::handler::dns { -using namespace vizer::input::pcap; +using namespace visor::input::pcap; -class DnsMetricsBucket final : public vizer::AbstractMetricsBucket +class DnsMetricsBucket final : public visor::AbstractMetricsBucket { -public: - const uint8_t START_FI_MAP_SIZE = 7; // 2^7 = 128 - const uint8_t MAX_FI_MAP_SIZE = 13; // 2^13 = 8192 - protected: mutable std::shared_mutex _mutex; - datasketches::kll_sketch _dnsXactFromTimeUs; - datasketches::kll_sketch _dnsXactToTimeUs; + Quantile _dnsXactFromTimeUs; + Quantile _dnsXactToTimeUs; - datasketches::cpc_sketch _dns_qnameCard; + Cardinality _dns_qnameCard; - datasketches::frequent_items_sketch _dns_topQname2; - datasketches::frequent_items_sketch _dns_topQname3; - datasketches::frequent_items_sketch _dns_topNX; - datasketches::frequent_items_sketch _dns_topREFUSED; - datasketches::frequent_items_sketch _dns_topSRVFAIL; - datasketches::frequent_items_sketch _dns_topUDPPort; - datasketches::frequent_items_sketch _dns_topQType; - datasketches::frequent_items_sketch _dns_topRCode; - datasketches::frequent_items_sketch _dns_slowXactIn; - datasketches::frequent_items_sketch _dns_slowXactOut; + TopN _dns_topQname2; + TopN _dns_topQname3; + TopN _dns_topNX; + TopN _dns_topREFUSED; + TopN _dns_topSRVFAIL; + TopN _dns_topUDPPort; + TopN _dns_topQType; + TopN _dns_topRCode; + TopN _dns_slowXactIn; + TopN _dns_slowXactOut; struct counters { - uint64_t xacts_total = 0; - uint64_t xacts_in = 0; - uint64_t xacts_out = 0; - uint64_t xacts_timed_out = 0; - uint64_t queries = 0; - uint64_t replies = 0; - uint64_t UDP = 0; - uint64_t TCP = 0; - uint64_t IPv4 = 0; - uint64_t IPv6 = 0; - uint64_t NX = 0; - uint64_t REFUSED = 0; - uint64_t SRVFAIL = 0; - uint64_t NOERROR = 0; + Counter xacts_total; + Counter xacts_in; + Counter xacts_out; + Counter xacts_timed_out; + Counter queries; + Counter replies; + Counter UDP; + Counter TCP; + Counter IPv4; + Counter IPv6; + Counter NX; + Counter REFUSED; + Counter SRVFAIL; + Counter NOERROR; + counters() + : xacts_total("dns", {"xact", "counts", "total"}, "Total DNS transactions (query/reply pairs)") + , xacts_in("dns", {"xact", "in", "total"}, "Total ingress DNS transactions (host is server)") + , xacts_out("dns", {"xact", "out", "total"}, "Total egress DNS transactions (host is client)") + , xacts_timed_out("dns", {"xact", "counts", "timed_out"}, "Total number of DNS transactions that timed out") + , queries("dns", {"wire_packets", "queries"}, "Total DNS wire packets flagged as query (ingress and egress)") + , replies("dns", {"wire_packets", "replies"}, "Total DNS wire packets flagged as reply (ingress and egress)") + , UDP("dns", {"wire_packets", "udp"}, "Total DNS wire packets received over UDP (ingress and egress)") + , TCP("dns", {"wire_packets", "tcp"}, "Total DNS wire packets received over TCP (ingress and egress)") + , IPv4("dns", {"wire_packets", "ipv4"}, "Total DNS wire packets received over IPv4 (ingress and egress)") + , IPv6("dns", {"wire_packets", "ipv6"}, "Total DNS wire packets received over IPv6 (ingress and egress)") + , NX("dns", {"wire_packets", "nxdomain"}, "Total DNS wire packets flagged as reply with return code NXDOMAIN (ingress and egress)") + , REFUSED("dns", {"wire_packets", "refused"}, "Total DNS wire packets flagged as reply with return code REFUSED (ingress and egress)") + , SRVFAIL("dns", {"wire_packets", "srvfail"}, "Total DNS wire packets flagged as reply with return code SRVFAIL (ingress and egress)") + , NOERROR("dns", {"wire_packets", "noerror"}, "Total DNS wire packets flagged as reply with return code NOERROR (ingress and egress)") + { + } }; counters _counters; public: DnsMetricsBucket() - : _dnsXactFromTimeUs() - , _dnsXactToTimeUs() - , _dns_qnameCard() - , _dns_topQname2(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topQname3(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topNX(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topREFUSED(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topSRVFAIL(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topUDPPort(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topQType(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_topRCode(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_slowXactIn(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _dns_slowXactOut(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _counters() + : _dnsXactFromTimeUs("dns", {"xact", "out", "quantiles_us"}, "Quantiles of transaction timing (query/reply pairs) when host is client, in microseconds") + , _dnsXactToTimeUs("dns", {"xact", "in", "quantiles_us"}, "Quantiles of transaction timing (query/reply pairs) when host is server, in microseconds") + , _dns_qnameCard("dns", {"cardinality", "qname"}, "Cardinality of unique QNAMES, both ingress and egress") + , _dns_topQname2("dns", {"top_qname2"}, "Top QNAMES, aggregated at a depth of two labels") + , _dns_topQname3("dns", {"top_qname3"}, "Top QNAMES, aggregated at a depth of three labels") + , _dns_topNX("dns", {"top_nxdomain"}, "Top QNAMES with result code NXDOMAIN") + , _dns_topREFUSED("dns", {"top_refused"}, "Top QNAMES with result code REFUSED") + , _dns_topSRVFAIL("dns", {"top_srvfail"}, "Top QNAMES with result code SRVFAIL") + , _dns_topUDPPort("dns", {"top_udp_ports"}, "Top UDP source port on the query side of a transaction") + , _dns_topQType("dns", {"top_qtype"}, "Top query types") + , _dns_topRCode("dns", {"top_rcode"}, "Top result codes") + , _dns_slowXactIn("dns", {"xact", "in", "top_slow"}, "Top QNAMES in transactions where host is the server and transaction speed is slower than p90") + , _dns_slowXactOut("dns", {"xact", "out", "top_slow"}, "Top QNAMES in transactions where host is the client and transaction speed is slower than p90") { + set_event_rate_info("dns", {"rates", "total"}, "Rate of all DNS wire packets (combined ingress and egress) per second"); + set_num_events_info("dns", {"wire_packets", "total"}, "Total DNS wire packets"); + set_num_sample_info("dns", {"wire_packets", "deep_samples"}, "Total DNS wire packets that were sampled for deep inspection"); } auto get_xact_data_locked() const { std::shared_lock lock(_mutex); struct retVals { - const datasketches::kll_sketch &xact_to; - const datasketches::kll_sketch &xact_from; + const Quantile &xact_to; + const Quantile &xact_from; std::shared_lock lock; }; return retVals{_dnsXactToTimeUs, _dnsXactFromTimeUs, std::move(lock)}; @@ -110,16 +117,17 @@ class DnsMetricsBucket final : public vizer::AbstractMetricsBucket return _counters; } - // vizer::AbstractMetricsBucket + // visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other) override; void to_json(json &j) const override; + void to_prometheus(std::stringstream &out) const override; void process_dns_layer(bool deep, DnsLayer &payload, pcpp::ProtocolType l3, pcpp::ProtocolType l4, uint16_t port); void new_dns_transaction(bool deep, float to90th, float from90th, DnsLayer &dns, PacketDirection dir, DnsTransaction xact); }; -class DnsMetricsManager final : public vizer::AbstractMetricsManager +class DnsMetricsManager final : public visor::AbstractMetricsManager { QueryResponsePairMgr _qr_pair_manager; @@ -128,7 +136,7 @@ class DnsMetricsManager final : public vizer::AbstractMetricsManager(periods, deepSampleRate) + : visor::AbstractMetricsManager(periods, deepSampleRate) { } @@ -190,7 +198,7 @@ struct TcpFlowData { } }; -class DnsStreamHandler final : public vizer::StreamMetricsHandler +class DnsStreamHandler final : public visor::StreamMetricsHandler { PcapInputStream *_stream; @@ -217,7 +225,7 @@ class DnsStreamHandler final : public vizer::StreamMetricsHandler #include -namespace vizer::handler::dns { +namespace visor::handler::dns { typedef std::pair AggDomainResult; AggDomainResult aggregateDomain(const std::string &domain); diff --git a/src/handlers/dns/querypairmgr.cpp b/src/handlers/dns/querypairmgr.cpp index 1e0cd25d0..309e4b0aa 100644 --- a/src/handlers/dns/querypairmgr.cpp +++ b/src/handlers/dns/querypairmgr.cpp @@ -17,7 +17,7 @@ static inline void timespec_diff(struct timespec *a, struct timespec *b, } } -namespace vizer::handler::dns { +namespace visor::handler::dns { void QueryResponsePairMgr::start_transaction(uint32_t flowKey, uint16_t queryID, timespec stamp) { diff --git a/src/handlers/dns/querypairmgr.h b/src/handlers/dns/querypairmgr.h index 19c14e65d..cfe531197 100644 --- a/src/handlers/dns/querypairmgr.h +++ b/src/handlers/dns/querypairmgr.h @@ -8,7 +8,7 @@ #include #include -namespace vizer::handler::dns { +namespace visor::handler::dns { using hr_clock = std::chrono::high_resolution_clock; diff --git a/src/handlers/dns/tests/CMakeLists.txt b/src/handlers/dns/tests/CMakeLists.txt new file mode 100644 index 000000000..316ef0156 --- /dev/null +++ b/src/handlers/dns/tests/CMakeLists.txt @@ -0,0 +1,27 @@ + +# Unit +add_executable(unit-tests-handler-dns + main.cpp + test_dns.cpp + test_dns_layer.cpp + test_json_schema.cpp + ) + +target_link_libraries(unit-tests-handler-dns + PRIVATE + ${CONAN_LIBS_JSON-SCHEMA-VALIDATOR} + Visor::Handler::Dns) + +add_test(NAME unit-tests-handler-dns + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src + COMMAND unit-tests-handler-dns + ) + +# Benchmark +add_executable(benchmark-handler-dns + benchmark_dns.cpp + ) + +target_link_libraries(benchmark-handler-dns PRIVATE + Visor::Handler::Dns + ${CONAN_LIBS_BENCHMARK}) diff --git a/src/handlers/dns/tests/benchmark_dns.cpp b/src/handlers/dns/tests/benchmark_dns.cpp new file mode 100644 index 000000000..c347a6dbd --- /dev/null +++ b/src/handlers/dns/tests/benchmark_dns.cpp @@ -0,0 +1,81 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "../dns.h" +#include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wold-style-cast" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wc99-extensions" +#pragma GCC diagnostic ignored "-Wpedantic" +#include +#include +#include +#pragma GCC diagnostic pop + +using namespace visor::handler::dns; + +static void BM_aggregateDomain(benchmark::State &state) +{ + AggDomainResult result; + std::string domain{"biz.foo.bar.com"}; + for (auto _ : state) { + result = aggregateDomain(domain); + } +} +BENCHMARK(BM_aggregateDomain); + +static void BM_aggregateDomainLong(benchmark::State &state) +{ + AggDomainResult result; + std::string domain{"long1.long2.long3.long4.long5.long6.long7.long8.biz.foo.bar.com"}; + for (auto _ : state) { + result = aggregateDomain(domain); + } +} + +BENCHMARK(BM_aggregateDomainLong); + +static void BM_pcapReadNoParse(benchmark::State &state) +{ + + for (auto _ : state) { + auto reader = pcpp::IFileReaderDevice::getReader("fixtures/dns_udp_tcp_random.pcap"); + + if (!reader->open()) { + throw std::runtime_error("Cannot open pcap/pcapng file"); + } + + pcpp::RawPacket rawPacket; + while (reader->getNextPacket(rawPacket)) { + } + + reader->close(); + delete reader; + } +} +BENCHMARK(BM_pcapReadNoParse); + +static void BM_pcapReadParse1(benchmark::State &state) +{ + + for (auto _ : state) { + auto reader = pcpp::IFileReaderDevice::getReader("fixtures/dns_udp_tcp_random.pcap"); + + if (!reader->open()) { + throw std::runtime_error("Cannot open pcap/pcapng file"); + } + + pcpp::RawPacket rawPacket; + while (reader->getNextPacket(rawPacket)) { + pcpp::Packet packet(&rawPacket, pcpp::OsiModelTransportLayer); + } + + reader->close(); + delete reader; + } +} +BENCHMARK(BM_pcapReadParse1); + +BENCHMARK_MAIN(); \ No newline at end of file diff --git a/src/handlers/dns/tests/test_dns.cpp b/src/handlers/dns/tests/test_dns.cpp index d0ad9086c..9ce59cb88 100644 --- a/src/handlers/dns/tests/test_dns.cpp +++ b/src/handlers/dns/tests/test_dns.cpp @@ -1,12 +1,10 @@ -#include #include -#include #include "dns.h" -using namespace vizer::handler::dns; +using namespace visor::handler::dns; -TEST_CASE("dns", "[dns]") +TEST_CASE("DNS Utilities", "[dns]") { SECTION("aggregateDomain") diff --git a/src/handlers/dns/tests/test_dns_layer.cpp b/src/handlers/dns/tests/test_dns_layer.cpp index 572f1daa8..690a49865 100644 --- a/src/handlers/dns/tests/test_dns_layer.cpp +++ b/src/handlers/dns/tests/test_dns_layer.cpp @@ -3,15 +3,15 @@ #include "DnsStreamHandler.h" #include "PcapInputStream.h" -using namespace vizer::handler::dns; -using namespace vizer::input::pcap; +using namespace visor::handler::dns; +using namespace visor::input::pcap; using namespace nlohmann; TEST_CASE("Parse DNS UDP IPv4 tests", "[pcap][ipv4][udp][dns]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv4_udp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_udp.pcap"); stream.config_set("bpf", ""); DnsStreamHandler dns_handler{"dns-test", &stream, 1, 100}; @@ -22,7 +22,7 @@ TEST_CASE("Parse DNS UDP IPv4 tests", "[pcap][ipv4][udp][dns]") stream.stop(); auto counters = dns_handler.metrics()->bucket(0)->counters(); - auto event_data = dns_handler.metrics()->bucket(0)->event_data(); + auto event_data = dns_handler.metrics()->bucket(0)->event_data_locked(); CHECK(dns_handler.metrics()->current_periods() == 1); CHECK(dns_handler.metrics()->start_tstamp().tv_sec == 1567706414); @@ -32,17 +32,17 @@ TEST_CASE("Parse DNS UDP IPv4 tests", "[pcap][ipv4][udp][dns]") CHECK(dns_handler.metrics()->end_tstamp().tv_nsec == 602866000); CHECK(dns_handler.metrics()->bucket(0)->period_length() == 6); - + json j; dns_handler.metrics()->bucket(0)->to_json(j); CHECK(dns_handler.metrics()->current_periods() == 1); - CHECK(event_data.num_events == 140); - CHECK(counters.UDP == 140); - CHECK(counters.IPv4 == 140); - CHECK(counters.IPv6 == 0); - CHECK(counters.queries == 70); - CHECK(counters.replies == 70); + CHECK(event_data.num_events->value() == 140); + CHECK(counters.UDP.value() == 140); + CHECK(counters.IPv4.value() == 140); + CHECK(counters.IPv6.value() == 0); + CHECK(counters.queries.value() == 70); + CHECK(counters.replies.value() == 70); CHECK(j["top_qname2"][0]["name"] == ".test.com"); CHECK(j["top_qname2"][0]["estimate"] == 140); } @@ -50,7 +50,7 @@ TEST_CASE("Parse DNS UDP IPv4 tests", "[pcap][ipv4][udp][dns]") TEST_CASE("Parse DNS TCP IPv4 tests", "[pcap][ipv4][tcp][dns]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv4_tcp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_tcp.pcap"); stream.config_set("bpf", ""); DnsStreamHandler dns_handler{"dns-test", &stream, 1, 100}; @@ -61,16 +61,16 @@ TEST_CASE("Parse DNS TCP IPv4 tests", "[pcap][ipv4][tcp][dns]") stream.stop(); auto counters = dns_handler.metrics()->bucket(0)->counters(); - auto event_data = dns_handler.metrics()->bucket(0)->event_data(); + auto event_data = dns_handler.metrics()->bucket(0)->event_data_locked(); json j; dns_handler.metrics()->bucket(0)->to_json(j); - CHECK(event_data.num_events == 420); - CHECK(counters.TCP == 420); - CHECK(counters.IPv4 == 420); - CHECK(counters.IPv6 == 0); - CHECK(counters.queries == 210); - CHECK(counters.replies == 210); + CHECK(event_data.num_events->value() == 420); + CHECK(counters.TCP.value() == 420); + CHECK(counters.IPv4.value() == 420); + CHECK(counters.IPv6.value() == 0); + CHECK(counters.queries.value() == 210); + CHECK(counters.replies.value() == 210); CHECK(j["top_qname2"][0]["name"] == ".test.com"); CHECK(j["top_qname2"][0]["estimate"] == 420); } @@ -79,7 +79,7 @@ TEST_CASE("Parse DNS UDP IPv6 tests", "[pcap][ipv6][udp][dns]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv6_udp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv6_udp.pcap"); stream.config_set("bpf", ""); DnsStreamHandler dns_handler{"dns-test", &stream, 1, 100}; @@ -90,16 +90,16 @@ TEST_CASE("Parse DNS UDP IPv6 tests", "[pcap][ipv6][udp][dns]") dns_handler.stop(); auto counters = dns_handler.metrics()->bucket(0)->counters(); - auto event_data = dns_handler.metrics()->bucket(0)->event_data(); + auto event_data = dns_handler.metrics()->bucket(0)->event_data_locked(); json j; dns_handler.metrics()->bucket(0)->to_json(j); - CHECK(event_data.num_events == 140); - CHECK(counters.UDP == 140); - CHECK(counters.IPv4 == 0); - CHECK(counters.IPv6 == 140); - CHECK(counters.queries == 70); - CHECK(counters.replies == 70); + CHECK(event_data.num_events->value() == 140); + CHECK(counters.UDP.value() == 140); + CHECK(counters.IPv4.value() == 0); + CHECK(counters.IPv6.value() == 140); + CHECK(counters.queries.value() == 70); + CHECK(counters.replies.value() == 70); CHECK(j["top_qname2"][0]["name"] == ".test.com"); CHECK(j["top_qname2"][0]["estimate"] == 140); } @@ -108,7 +108,7 @@ TEST_CASE("Parse DNS TCP IPv6 tests", "[pcap][ipv6][tcp][dns]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv6_tcp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv6_tcp.pcap"); stream.config_set("bpf", ""); DnsStreamHandler dns_handler{"dns-test", &stream, 1, 100}; @@ -119,16 +119,16 @@ TEST_CASE("Parse DNS TCP IPv6 tests", "[pcap][ipv6][tcp][dns]") dns_handler.stop(); auto counters = dns_handler.metrics()->bucket(0)->counters(); - auto event_data = dns_handler.metrics()->bucket(0)->event_data(); + auto event_data = dns_handler.metrics()->bucket(0)->event_data_locked(); json j; dns_handler.metrics()->bucket(0)->to_json(j); - CHECK(event_data.num_events == 360); - CHECK(counters.TCP == 360); - CHECK(counters.IPv4 == 0); - CHECK(counters.IPv6 == 360); - CHECK(counters.queries == 180); - CHECK(counters.replies == 180); + CHECK(event_data.num_events->value() == 360); + CHECK(counters.TCP.value() == 360); + CHECK(counters.IPv4.value() == 0); + CHECK(counters.IPv6.value() == 360); + CHECK(counters.queries.value() == 180); + CHECK(counters.replies.value() == 180); CHECK(j["top_qname2"][0]["name"] == ".test.com"); CHECK(j["top_qname2"][0]["estimate"] == 360); } @@ -137,7 +137,7 @@ TEST_CASE("Parse DNS random UDP/TCP tests", "[pcap][net]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_udp_tcp_random.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); stream.config_set("bpf", ""); stream.config_set("host_spec", "192.168.0.0/24"); stream.parse_host_spec(); @@ -150,27 +150,27 @@ TEST_CASE("Parse DNS random UDP/TCP tests", "[pcap][net]") dns_handler.stop(); auto counters = dns_handler.metrics()->bucket(0)->counters(); - auto event_data = dns_handler.metrics()->bucket(0)->event_data(); + auto event_data = dns_handler.metrics()->bucket(0)->event_data_locked(); // confirmed with wireshark. there are 14 TCP retransmissions which are counted differently in our state machine // and account for some minor differences in TCP based stats - CHECK(event_data.num_events == 5851); // wireshark: 5838 - CHECK(event_data.num_samples == 5851); - CHECK(counters.TCP == 2880); // wireshark: 2867 - CHECK(counters.UDP == 2971); - CHECK(counters.IPv4 == 5851); // wireshark: 5838 - CHECK(counters.IPv6 == 0); - CHECK(counters.queries == 2930); - CHECK(counters.replies == 2921); // wireshark: 2908 - CHECK(counters.xacts_total == 2921); // wireshark: 2894 - CHECK(counters.xacts_in == 0); - CHECK(counters.xacts_out == 2921); // wireshark: 2894 - CHECK(counters.xacts_timed_out == 0); - CHECK(counters.NOERROR == 2921); // wireshark: 5838 (we only count reply result codes) - CHECK(counters.NOERROR == 2921); // wireshark: 5838 (we only count reply result codes) - CHECK(counters.NX == 0); - CHECK(counters.REFUSED == 0); - CHECK(counters.SRVFAIL == 0); + CHECK(event_data.num_events->value() == 5851); // wireshark: 5838 + CHECK(event_data.num_samples->value() == 5851); + CHECK(counters.TCP.value() == 2880); // wireshark: 2867 + CHECK(counters.UDP.value() == 2971); + CHECK(counters.IPv4.value() == 5851); // wireshark: 5838 + CHECK(counters.IPv6.value() == 0); + CHECK(counters.queries.value() == 2930); + CHECK(counters.replies.value() == 2921); // wireshark: 2908 + CHECK(counters.xacts_total.value() == 2921); // wireshark: 2894 + CHECK(counters.xacts_in.value() == 0); + CHECK(counters.xacts_out.value() == 2921); // wireshark: 2894 + CHECK(counters.xacts_timed_out.value() == 0); + CHECK(counters.NOERROR.value() == 2921); // wireshark: 5838 (we only count reply result codes) + CHECK(counters.NOERROR.value() == 2921); // wireshark: 5838 (we only count reply result codes) + CHECK(counters.NX.value() == 0); + CHECK(counters.REFUSED.value() == 0); + CHECK(counters.SRVFAIL.value() == 0); nlohmann::json j; dns_handler.metrics()->bucket(0)->to_json(j); @@ -178,10 +178,10 @@ TEST_CASE("Parse DNS random UDP/TCP tests", "[pcap][net]") CHECK(j["cardinality"]["qname"] == 2055); // flame was run with 1000 randoms x2 (udp+tcp) CHECK(j["top_qname2"][0]["name"] == ".test.com"); - CHECK(j["top_qname2"][0]["estimate"] == event_data.num_events); + CHECK(j["top_qname2"][0]["estimate"] == event_data.num_events->value()); CHECK(j["top_rcode"][0]["name"] == "NOERROR"); - CHECK(j["top_rcode"][0]["estimate"] == counters.NOERROR); + CHECK(j["top_rcode"][0]["estimate"] == counters.NOERROR.value()); CHECK(j["top_udp_ports"][0]["name"] == "57975"); CHECK(j["top_udp_ports"][0]["estimate"] == 302); diff --git a/src/handlers/dns/tests/test_json_schema.cpp b/src/handlers/dns/tests/test_json_schema.cpp new file mode 100644 index 000000000..fdd0023b8 --- /dev/null +++ b/src/handlers/dns/tests/test_json_schema.cpp @@ -0,0 +1,61 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include +#include +#include +#include +#include + +#include "DnsStreamHandler.h" +#include "PcapInputStream.h" + +using namespace visor::handler::dns; +using namespace visor::input::pcap; +using namespace nlohmann; +using nlohmann::json_schema::json_validator; + +TEST_CASE("DNS JSON Schema", "[dns][iface][json]") +{ + + SECTION("json iface") + { + PcapInputStream stream{"pcap-test"}; + stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); + stream.config_set("bpf", ""); + stream.config_set("host_spec", "192.168.0.0/24"); + stream.parse_host_spec(); + + DnsStreamHandler dns_handler{"dns-test", &stream, 5, 100}; + dns_handler.config_set("recorded_stream", true); + + dns_handler.start(); + stream.start(); + stream.stop(); + dns_handler.stop(); + + json dns_json; + dns_handler.metrics()->window_merged_json(dns_json, dns_handler.schema_key(), 5); + + std::ifstream sfile("handlers/dns/tests/window-schema.json"); + CHECK(sfile.is_open()); + std::string schema; + + sfile.seekg(0, std::ios::end); + schema.reserve(sfile.tellg()); + sfile.seekg(0, std::ios::beg); + + schema.assign((std::istreambuf_iterator(sfile)), std::istreambuf_iterator()); + json_validator validator; + + auto schema_json = json::parse(schema); + + try { + validator.set_root_schema(schema_json); + validator.validate(dns_json); + } catch (const std::exception &e) { + FAIL(e.what()); + } + } +} diff --git a/src/handlers/dns/tests/window-schema.json b/src/handlers/dns/tests/window-schema.json new file mode 100644 index 000000000..b29c759e1 --- /dev/null +++ b/src/handlers/dns/tests/window-schema.json @@ -0,0 +1,1402 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "$id": "http://example.com/example.json", + "type": "object", + "title": "The root schema", + "description": "The root schema comprises the entire JSON document.", + "default": {}, + "examples": [ + { + "5m": { + "dns": { + "cardinality": { + "qname": 2048 + }, + "period": { + "length": 31, + "start_ts": 1614874231 + }, + "top_nxdomain": [], + "top_qname2": [ + { + "estimate": 5851, + "name": ".test.com" + } + ], + "top_qname3": [ + { + "estimate": 8, + "name": ".as.test.com" + }, + { + "estimate": 8, + "name": ".jk.test.com" + }, + { + "estimate": 8, + "name": ".M6.test.com" + }, + { + "estimate": 6, + "name": ".AG.test.com" + }, + { + "estimate": 4, + "name": "Lg.test.com" + }, + { + "estimate": 4, + "name": ".LDyZ.test.com" + }, + { + "estimate": 4, + "name": "_GbQ8x.test.com" + }, + { + "estimate": 4, + "name": "3ZI6Sv.test.com" + }, + { + "estimate": 4, + "name": ".0E5QKD.test.com" + }, + { + "estimate": 4, + "name": "oK5y8IB.test.com" + } + ], + "top_qtype": [ + { + "estimate": 1476, + "name": "AAAA" + }, + { + "estimate": 825, + "name": "CNAME" + }, + { + "estimate": 794, + "name": "SOA" + }, + { + "estimate": 757, + "name": "MX" + }, + { + "estimate": 717, + "name": "A" + }, + { + "estimate": 662, + "name": "NS" + }, + { + "estimate": 620, + "name": "TXT" + } + ], + "top_rcode": [ + { + "estimate": 2921, + "name": "NOERROR" + } + ], + "top_refused": [], + "top_srvfail": [], + "top_udp_ports": [ + { + "estimate": 302, + "name": "57975" + }, + { + "estimate": 298, + "name": "64406" + }, + { + "estimate": 298, + "name": "59371" + }, + { + "estimate": 298, + "name": "54916" + }, + { + "estimate": 298, + "name": "63354" + }, + { + "estimate": 298, + "name": "56323" + }, + { + "estimate": 298, + "name": "64767" + }, + { + "estimate": 298, + "name": "49867" + }, + { + "estimate": 298, + "name": "53476" + }, + { + "estimate": 289, + "name": "52301" + } + ], + "wire_packets": { + "deep_samples": 5851, + "ipv4": 5851, + "ipv6": 0, + "noerror": 2921, + "nxdomain": 0, + "queries": 2930, + "refused": 0, + "replies": 2921, + "srvfail": 0, + "tcp": 2880, + "total": 5851, + "udp": 2971 + }, + "xact": { + "counts": { + "timed_out": 0, + "total": 2921 + }, + "in": { + "top_slow": [], + "total": 0 + }, + "out": { + "quantiles_us": { + "p50": 31582, + "p90": 41599, + "p95": 65418, + "p99": 325152 + }, + "top_slow": [], + "total": 2921 + } + } + } + } + } + ], + "required": [ + "5m" + ], + "properties": { + "5m": { + "$id": "#/properties/5m", + "type": "object", + "title": "The 5m schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "dns": { + "cardinality": { + "qname": 2048 + }, + "period": { + "length": 31, + "start_ts": 1614874231 + }, + "top_nxdomain": [], + "top_qname2": [ + { + "estimate": 5851, + "name": ".test.com" + } + ], + "top_qname3": [ + { + "estimate": 8, + "name": ".as.test.com" + }, + { + "estimate": 8, + "name": ".jk.test.com" + }, + { + "estimate": 8, + "name": ".M6.test.com" + }, + { + "estimate": 6, + "name": ".AG.test.com" + }, + { + "estimate": 4, + "name": "Lg.test.com" + }, + { + "estimate": 4, + "name": ".LDyZ.test.com" + }, + { + "estimate": 4, + "name": "_GbQ8x.test.com" + }, + { + "estimate": 4, + "name": "3ZI6Sv.test.com" + }, + { + "estimate": 4, + "name": ".0E5QKD.test.com" + }, + { + "estimate": 4, + "name": "oK5y8IB.test.com" + } + ], + "top_qtype": [ + { + "estimate": 1476, + "name": "AAAA" + }, + { + "estimate": 825, + "name": "CNAME" + }, + { + "estimate": 794, + "name": "SOA" + }, + { + "estimate": 757, + "name": "MX" + }, + { + "estimate": 717, + "name": "A" + }, + { + "estimate": 662, + "name": "NS" + }, + { + "estimate": 620, + "name": "TXT" + } + ], + "top_rcode": [ + { + "estimate": 2921, + "name": "NOERROR" + } + ], + "top_refused": [], + "top_srvfail": [], + "top_udp_ports": [ + { + "estimate": 302, + "name": "57975" + }, + { + "estimate": 298, + "name": "64406" + }, + { + "estimate": 298, + "name": "59371" + }, + { + "estimate": 298, + "name": "54916" + }, + { + "estimate": 298, + "name": "63354" + }, + { + "estimate": 298, + "name": "56323" + }, + { + "estimate": 298, + "name": "64767" + }, + { + "estimate": 298, + "name": "49867" + }, + { + "estimate": 298, + "name": "53476" + }, + { + "estimate": 289, + "name": "52301" + } + ], + "wire_packets": { + "deep_samples": 5851, + "ipv4": 5851, + "ipv6": 0, + "noerror": 2921, + "nxdomain": 0, + "queries": 2930, + "refused": 0, + "replies": 2921, + "srvfail": 0, + "tcp": 2880, + "total": 5851, + "udp": 2971 + }, + "xact": { + "counts": { + "timed_out": 0, + "total": 2921 + }, + "in": { + "top_slow": [], + "total": 0 + }, + "out": { + "quantiles_us": { + "p50": 31582, + "p90": 41599, + "p95": 65418, + "p99": 325152 + }, + "top_slow": [], + "total": 2921 + } + } + } + } + ], + "required": [ + "dns" + ], + "properties": { + "dns": { + "$id": "#/properties/5m/properties/dns", + "type": "object", + "title": "The dns schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "cardinality": { + "qname": 2048 + }, + "period": { + "length": 31, + "start_ts": 1614874231 + }, + "top_nxdomain": [], + "top_qname2": [ + { + "estimate": 5851, + "name": ".test.com" + } + ], + "top_qname3": [ + { + "estimate": 8, + "name": ".as.test.com" + }, + { + "estimate": 8, + "name": ".jk.test.com" + }, + { + "estimate": 8, + "name": ".M6.test.com" + }, + { + "estimate": 6, + "name": ".AG.test.com" + }, + { + "estimate": 4, + "name": "Lg.test.com" + }, + { + "estimate": 4, + "name": ".LDyZ.test.com" + }, + { + "estimate": 4, + "name": "_GbQ8x.test.com" + }, + { + "estimate": 4, + "name": "3ZI6Sv.test.com" + }, + { + "estimate": 4, + "name": ".0E5QKD.test.com" + }, + { + "estimate": 4, + "name": "oK5y8IB.test.com" + } + ], + "top_qtype": [ + { + "estimate": 1476, + "name": "AAAA" + }, + { + "estimate": 825, + "name": "CNAME" + }, + { + "estimate": 794, + "name": "SOA" + }, + { + "estimate": 757, + "name": "MX" + }, + { + "estimate": 717, + "name": "A" + }, + { + "estimate": 662, + "name": "NS" + }, + { + "estimate": 620, + "name": "TXT" + } + ], + "top_rcode": [ + { + "estimate": 2921, + "name": "NOERROR" + } + ], + "top_refused": [], + "top_srvfail": [], + "top_udp_ports": [ + { + "estimate": 302, + "name": "57975" + }, + { + "estimate": 298, + "name": "64406" + }, + { + "estimate": 298, + "name": "59371" + }, + { + "estimate": 298, + "name": "54916" + }, + { + "estimate": 298, + "name": "63354" + }, + { + "estimate": 298, + "name": "56323" + }, + { + "estimate": 298, + "name": "64767" + }, + { + "estimate": 298, + "name": "49867" + }, + { + "estimate": 298, + "name": "53476" + }, + { + "estimate": 289, + "name": "52301" + } + ], + "wire_packets": { + "deep_samples": 5851, + "ipv4": 5851, + "ipv6": 0, + "noerror": 2921, + "nxdomain": 0, + "queries": 2930, + "refused": 0, + "replies": 2921, + "srvfail": 0, + "tcp": 2880, + "total": 5851, + "udp": 2971 + }, + "xact": { + "counts": { + "timed_out": 0, + "total": 2921 + }, + "in": { + "top_slow": [], + "total": 0 + }, + "out": { + "quantiles_us": { + "p50": 31582, + "p90": 41599, + "p95": 65418, + "p99": 325152 + }, + "top_slow": [], + "total": 2921 + } + } + } + ], + "required": [ + "cardinality", + "period", + "top_nxdomain", + "top_qname2", + "top_qname3", + "top_qtype", + "top_rcode", + "top_refused", + "top_srvfail", + "top_udp_ports", + "wire_packets", + "xact" + ], + "properties": { + "cardinality": { + "$id": "#/properties/5m/properties/dns/properties/cardinality", + "type": "object", + "title": "The cardinality schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "qname": 2048 + } + ], + "required": [ + "qname" + ], + "properties": { + "qname": { + "$id": "#/properties/5m/properties/dns/properties/cardinality/properties/qname", + "type": "integer", + "title": "The qname schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2048 + ] + } + }, + "additionalProperties": false + }, + "period": { + "$id": "#/properties/5m/properties/dns/properties/period", + "type": "object", + "title": "The period schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "length": 31, + "start_ts": 1614874231 + } + ], + "required": [ + "length", + "start_ts" + ], + "properties": { + "length": { + "$id": "#/properties/5m/properties/dns/properties/period/properties/length", + "type": "integer", + "title": "The length schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 31 + ] + }, + "start_ts": { + "$id": "#/properties/5m/properties/dns/properties/period/properties/start_ts", + "type": "integer", + "title": "The start_ts schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1614874231 + ] + } + }, + "additionalProperties": false + }, + "top_nxdomain": { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain", + "type": "array", + "title": "The top_nxdomain schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_nxdomain/items" + } + }, + "top_qname2": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2", + "type": "array", + "title": "The top_qname2 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 5851, + "name": ".test.com" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 5851, + "name": ".test.com" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 5851 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_qname2/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + ".test.com" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_qname3": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3", + "type": "array", + "title": "The top_qname3 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 8, + "name": ".as.test.com" + }, + { + "estimate": 8, + "name": ".jk.test.com" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 8, + "name": ".as.test.com" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 8 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_qname3/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + ".as.test.com" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_qtype": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype", + "type": "array", + "title": "The top_qtype schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 1476, + "name": "AAAA" + }, + { + "estimate": 825, + "name": "CNAME" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 1476, + "name": "AAAA" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1476 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_qtype/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "AAAA" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_rcode": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode", + "type": "array", + "title": "The top_rcode schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 2921, + "name": "NOERROR" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 2921, + "name": "NOERROR" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2921 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_rcode/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "NOERROR" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_refused": { + "$id": "#/properties/5m/properties/dns/properties/top_refused", + "type": "array", + "title": "The top_refused schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_refused/items" + } + }, + "top_srvfail": { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail", + "type": "array", + "title": "The top_srvfail schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_srvfail/items" + } + }, + "top_udp_ports": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports", + "type": "array", + "title": "The top_udp_ports schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 302, + "name": "57975" + }, + { + "estimate": 298, + "name": "64406" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 302, + "name": "57975" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 302 + ] + }, + "name": { + "$id": "#/properties/5m/properties/dns/properties/top_udp_ports/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "57975" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "wire_packets": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets", + "type": "object", + "title": "The wire_packets schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "deep_samples": 5851, + "ipv4": 5851, + "ipv6": 0, + "noerror": 2921, + "nxdomain": 0, + "queries": 2930, + "refused": 0, + "replies": 2921, + "srvfail": 0, + "tcp": 2880, + "total": 5851, + "udp": 2971 + } + ], + "required": [ + "deep_samples", + "ipv4", + "ipv6", + "noerror", + "nxdomain", + "queries", + "refused", + "replies", + "srvfail", + "tcp", + "total", + "udp" + ], + "properties": { + "deep_samples": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/deep_samples", + "type": "integer", + "title": "The deep_samples schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 5851 + ] + }, + "ipv4": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/ipv4", + "type": "integer", + "title": "The ipv4 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 5851 + ] + }, + "ipv6": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/ipv6", + "type": "integer", + "title": "The ipv6 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "noerror": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/noerror", + "type": "integer", + "title": "The noerror schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2921 + ] + }, + "nxdomain": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/nxdomain", + "type": "integer", + "title": "The nxdomain schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "queries": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/queries", + "type": "integer", + "title": "The queries schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2930 + ] + }, + "refused": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/refused", + "type": "integer", + "title": "The refused schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "replies": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/replies", + "type": "integer", + "title": "The replies schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2921 + ] + }, + "srvfail": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/srvfail", + "type": "integer", + "title": "The srvfail schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "tcp": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/tcp", + "type": "integer", + "title": "The tcp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2880 + ] + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 5851 + ] + }, + "udp": { + "$id": "#/properties/5m/properties/dns/properties/wire_packets/properties/udp", + "type": "integer", + "title": "The udp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2971 + ] + } + }, + "additionalProperties": false + }, + "xact": { + "$id": "#/properties/5m/properties/dns/properties/xact", + "type": "object", + "title": "The xact schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "counts": { + "timed_out": 0, + "total": 2921 + }, + "in": { + "top_slow": [], + "total": 0 + }, + "out": { + "quantiles_us": { + "p50": 31582, + "p90": 41599, + "p95": 65418, + "p99": 325152 + }, + "top_slow": [], + "total": 2921 + } + } + ], + "required": [ + "counts", + "in", + "out" + ], + "properties": { + "counts": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/counts", + "type": "object", + "title": "The counts schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "timed_out": 0, + "total": 2921 + } + ], + "required": [ + "timed_out", + "total" + ], + "properties": { + "timed_out": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/counts/properties/timed_out", + "type": "integer", + "title": "The timed_out schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/counts/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2921 + ] + } + }, + "additionalProperties": false + }, + "in": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in", + "type": "object", + "title": "The in schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "top_slow": [], + "total": 0 + } + ], + "required": [ + "top_slow", + "total" + ], + "properties": { + "top_slow": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow", + "type": "array", + "title": "The top_slow schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/top_slow/items" + } + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/in/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + } + }, + "additionalProperties": false + }, + "out": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out", + "type": "object", + "title": "The out schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "quantiles_us": { + "p50": 31582, + "p90": 41599, + "p95": 65418, + "p99": 325152 + }, + "top_slow": [], + "total": 2921 + } + ], + "required": [ + "quantiles_us", + "top_slow", + "total" + ], + "properties": { + "quantiles_us": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us", + "type": "object", + "title": "The quantiles_us schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "p50": 31582, + "p90": 41599, + "p95": 65418, + "p99": 325152 + } + ], + "required": [ + "p50", + "p90", + "p95", + "p99" + ], + "properties": { + "p50": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p50", + "type": "integer", + "title": "The p50 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 31582 + ] + }, + "p90": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p90", + "type": "integer", + "title": "The p90 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 41599 + ] + }, + "p95": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p95", + "type": "integer", + "title": "The p95 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 65418 + ] + }, + "p99": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/quantiles_us/properties/p99", + "type": "integer", + "title": "The p99 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 325152 + ] + } + }, + "additionalProperties": false + }, + "top_slow": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow", + "type": "array", + "title": "The top_slow schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/top_slow/items" + } + }, + "total": { + "$id": "#/properties/5m/properties/dns/properties/xact/properties/out/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2921 + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/src/handlers/net/CMakeLists.txt b/src/handlers/net/CMakeLists.txt index 27b1f41b5..47ffaf285 100644 --- a/src/handlers/net/CMakeLists.txt +++ b/src/handlers/net/CMakeLists.txt @@ -2,34 +2,23 @@ message(STATUS "Handler Module: Net") set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) -corrade_add_static_plugin(VizerHandlerNet +corrade_add_static_plugin(VisorHandlerNet ${CMAKE_CURRENT_BINARY_DIR} NetHandler.conf NetHandlerModulePlugin.cpp NetStreamHandler.cpp) -add_library(Vizer::Handler::Net ALIAS VizerHandlerNet) +add_library(Visor::Handler::Net ALIAS VisorHandlerNet) -target_include_directories(VizerHandlerNet +target_include_directories(VisorHandlerNet INTERFACE $ ) -target_link_libraries(VizerHandlerNet +target_link_libraries(VisorHandlerNet PUBLIC - Vizer::Input::Pcap + Visor::Input::Pcap ) -set(VIZER_STATIC_PLUGINS ${VIZER_STATIC_PLUGINS} Vizer::Handler::Net PARENT_SCOPE) +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} Visor::Handler::Net PARENT_SCOPE) -## TEST SUITE -add_executable(unit-tests-handler-net - tests/main.cpp - tests/test_net_layer.cpp - ) - -target_link_libraries(unit-tests-handler-net PRIVATE Vizer::Handler::Net) - -add_test(NAME unit-tests-handler-net - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/tests - COMMAND unit-tests-handler-net - ) \ No newline at end of file +add_subdirectory(tests) \ No newline at end of file diff --git a/src/handlers/net/NetHandlerModulePlugin.cpp b/src/handlers/net/NetHandlerModulePlugin.cpp index 828959e5b..66f93be53 100644 --- a/src/handlers/net/NetHandlerModulePlugin.cpp +++ b/src/handlers/net/NetHandlerModulePlugin.cpp @@ -8,12 +8,12 @@ #include #include -CORRADE_PLUGIN_REGISTER(VizerHandlerNet, vizer::handler::net::NetHandlerModulePlugin, - "dev.vizer.module.handler/1.0") +CORRADE_PLUGIN_REGISTER(VisorHandlerNet, visor::handler::net::NetHandlerModulePlugin, + "dev.visor.module.handler/1.0") -namespace vizer::handler::net { +namespace visor::handler::net { -using namespace vizer::input::pcap; +using namespace visor::input::pcap; using json = nlohmann::json; void NetHandlerModulePlugin::_setup_routes(HttpServer &svr) diff --git a/src/handlers/net/NetHandlerModulePlugin.h b/src/handlers/net/NetHandlerModulePlugin.h index 452d74f77..6344f5b9d 100644 --- a/src/handlers/net/NetHandlerModulePlugin.h +++ b/src/handlers/net/NetHandlerModulePlugin.h @@ -7,7 +7,7 @@ #include "HandlerModulePlugin.h" -namespace vizer::handler::net { +namespace visor::handler::net { class NetHandlerModulePlugin : public HandlerModulePlugin { @@ -17,7 +17,7 @@ class NetHandlerModulePlugin : public HandlerModulePlugin public: explicit NetHandlerModulePlugin(Corrade::PluginManager::AbstractManager &manager, const std::string &plugin) - : vizer::HandlerModulePlugin{manager, plugin} + : visor::HandlerModulePlugin{manager, plugin} { } diff --git a/src/handlers/net/NetStreamHandler.cpp b/src/handlers/net/NetStreamHandler.cpp index 5319811d4..acb29b8e2 100644 --- a/src/handlers/net/NetStreamHandler.cpp +++ b/src/handlers/net/NetStreamHandler.cpp @@ -17,10 +17,10 @@ #include #include -namespace vizer::handler::net { +namespace visor::handler::net { NetStreamHandler::NetStreamHandler(const std::string &name, PcapInputStream *stream, uint periods, uint deepSampleRate) - : vizer::StreamMetricsHandler(name, periods, deepSampleRate) + : visor::StreamMetricsHandler(name, periods, deepSampleRate) , _stream(stream) { assert(stream); @@ -32,6 +32,10 @@ void NetStreamHandler::start() return; } + if (config_exists("recorded_stream")) { + _metrics->set_recorded_stream(); + } + _pkt_connection = _stream->packet_signal.connect(&NetStreamHandler::process_packet_cb, this); _start_tstamp_connection = _stream->start_tstamp_signal.connect(&NetStreamHandler::set_start_tstamp, this); _end_tstamp_connection = _stream->end_tstamp_signal.connect(&NetStreamHandler::set_end_tstamp, this); @@ -82,6 +86,14 @@ void NetStreamHandler::info_json(json &j) const { _common_info_json(j); } +void NetStreamHandler::window_prometheus(std::stringstream &out) +{ + if (_metrics->current_periods() > 1) { + _metrics->window_single_prometheus(out, 1); + } else { + _metrics->window_single_prometheus(out, 0); + } +} void NetworkMetricsBucket::specialized_merge(const AbstractMetricsBucket &o) { @@ -103,15 +115,8 @@ void NetworkMetricsBucket::specialized_merge(const AbstractMetricsBucket &o) _counters.total_in += other._counters.total_in; _counters.total_out += other._counters.total_out; - datasketches::cpc_union merge_srcIPCard; - merge_srcIPCard.update(_srcIPCard); - merge_srcIPCard.update(other._srcIPCard); - _srcIPCard = merge_srcIPCard.get_result(); - - datasketches::cpc_union merge_dstIPCard; - merge_dstIPCard.update(_dstIPCard); - merge_dstIPCard.update(other._dstIPCard); - _dstIPCard = merge_dstIPCard.get_result(); + _srcIPCard.merge(other._srcIPCard); + _dstIPCard.merge(other._dstIPCard); _topIPv4.merge(other._topIPv4); _topIPv6.merge(other._topIPv6); @@ -119,106 +124,68 @@ void NetworkMetricsBucket::specialized_merge(const AbstractMetricsBucket &o) _topASN.merge(other._topASN); } -void NetworkMetricsBucket::to_json(json &j) const +void NetworkMetricsBucket::to_prometheus(std::stringstream &out) const { - const double fractions[4]{0.50, 0.90, 0.95, 0.99}; + _rate_in.to_prometheus(out); + _rate_out.to_prometheus(out); - // do rates first, which handle their own locking - { - if (!read_only()) { - j["rates"]["pps_in"]["live"] = _rate_in.rate(); - } - auto [rate_quantile, rate_lock] = _rate_in.quantile_get_rlocked(); - auto quantiles = rate_quantile->get_quantiles(fractions, 4); - if (quantiles.size()) { - j["rates"]["pps_in"]["p50"] = quantiles[0]; - j["rates"]["pps_in"]["p90"] = quantiles[1]; - j["rates"]["pps_in"]["p95"] = quantiles[2]; - j["rates"]["pps_in"]["p99"] = quantiles[3]; - } - } + auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - { - if (!read_only()) { - j["rates"]["pps_out"]["live"] = _rate_out.rate(); - } - auto [rate_quantile, rate_lock] = _rate_out.quantile_get_rlocked(); - auto quantiles = rate_quantile->get_quantiles(fractions, 4); - if (quantiles.size()) { - j["rates"]["pps_out"]["p50"] = quantiles[0]; - j["rates"]["pps_out"]["p90"] = quantiles[1]; - j["rates"]["pps_out"]["p95"] = quantiles[2]; - j["rates"]["pps_out"]["p99"] = quantiles[3]; - } - } + event_rate->to_prometheus(out); + num_events->to_prometheus(out); + num_samples->to_prometheus(out); - auto [num_events, num_samples, event_rate] = event_data(); // thread safe + std::shared_lock r_lock(_mutex); - { - if (!read_only()) { - j["rates"]["pps_total"]["live"] = event_rate->rate(); - } - auto [rate_quantile, rate_lock] = event_rate->quantile_get_rlocked(); - auto quantiles = rate_quantile->get_quantiles(fractions, 4); - if (quantiles.size()) { - j["rates"]["pps_total"]["p50"] = quantiles[0]; - j["rates"]["pps_total"]["p90"] = quantiles[1]; - j["rates"]["pps_total"]["p95"] = quantiles[2]; - j["rates"]["pps_total"]["p99"] = quantiles[3]; - } - } + _counters.UDP.to_prometheus(out); + _counters.TCP.to_prometheus(out); + _counters.OtherL4.to_prometheus(out); + _counters.IPv4.to_prometheus(out); + _counters.IPv6.to_prometheus(out); + _counters.total_in.to_prometheus(out); + _counters.total_out.to_prometheus(out); + + _srcIPCard.to_prometheus(out); + _dstIPCard.to_prometheus(out); + + _topIPv4.to_prometheus(out, [](const uint32_t &val) { return pcpp::IPv4Address(val).toString(); }); + _topIPv6.to_prometheus(out); + _topGeoLoc.to_prometheus(out); + _topASN.to_prometheus(out); +} - std::shared_lock r_lock(_mutex); +void NetworkMetricsBucket::to_json(json &j) const +{ - j["total"] = num_events; - j["deep_samples"] = num_samples; - j["udp"] = _counters.UDP; - j["tcp"] = _counters.TCP; - j["other_l4"] = _counters.OtherL4; - j["ipv4"] = _counters.IPv4; - j["ipv6"] = _counters.IPv6; - j["in"] = _counters.total_in; - j["out"] = _counters.total_out; - - j["cardinality"]["src_ips_in"] = lround(_srcIPCard.get_estimate()); - j["cardinality"]["dst_ips_out"] = lround(_dstIPCard.get_estimate()); - - { - j["top_ipv4"] = nlohmann::json::array(); - auto items = _topIPv4.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_ipv4"][i]["name"] = pcpp::IPv4Address(items[i].get_item()).toString(); - j["top_ipv4"][i]["estimate"] = items[i].get_estimate(); - } - } + // do rates first, which handle their own locking + bool live_rates = !read_only() && !recorded_stream(); + _rate_in.to_json(j, live_rates); + _rate_out.to_json(j, live_rates); - { - j["top_ipv6"] = nlohmann::json::array(); - auto items = _topIPv6.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_ipv6"][i]["name"] = items[i].get_item(); - j["top_ipv6"][i]["estimate"] = items[i].get_estimate(); - } - } + auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - { - j["top_geoLoc"] = nlohmann::json::array(); - auto items = _topGeoLoc.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_geoLoc"][i]["name"] = items[i].get_item(); - j["top_geoLoc"][i]["estimate"] = items[i].get_estimate(); - } - } + event_rate->to_json(j, live_rates); + num_events->to_json(j); + num_samples->to_json(j); - { - j["top_ASN"] = nlohmann::json::array(); - auto items = _topASN.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - for (uint64_t i = 0; i < std::min(10UL, items.size()); i++) { - j["top_ASN"][i]["name"] = items[i].get_item(); - j["top_ASN"][i]["estimate"] = items[i].get_estimate(); - } - } + std::shared_lock r_lock(_mutex); + + _counters.UDP.to_json(j); + _counters.TCP.to_json(j); + _counters.OtherL4.to_json(j); + _counters.IPv4.to_json(j); + _counters.IPv6.to_json(j); + _counters.total_in.to_json(j); + _counters.total_out.to_json(j); + + _srcIPCard.to_json(j); + _dstIPCard.to_json(j); + + _topIPv4.to_json(j, [](const uint32_t &val) { return pcpp::IPv4Address(val).toString(); }); + _topIPv6.to_json(j); + _topGeoLoc.to_json(j); + _topASN.to_json(j); } // the main bucket analysis @@ -274,10 +241,10 @@ void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, Pack auto IP6layer = payload.getLayerOfType(); if (IP4layer) { if (dir == PacketDirection::toHost) { - _srcIPCard.update(IP4layer->getSrcIpAddress().toInt()); - _topIPv4.update(IP4layer->getSrcIpAddress().toInt()); + _srcIPCard.update(IP4layer->getSrcIPv4Address().toInt()); + _topIPv4.update(IP4layer->getSrcIPv4Address().toInt()); if (geo::enabled()) { - if (IPv4tosockaddr(IP4layer->getSrcIpAddress(), &sa4)) { + if (IPv4tosockaddr(IP4layer->getSrcIPv4Address(), &sa4)) { if (geo::GeoIP().enabled()) { _topGeoLoc.update(geo::GeoIP().getGeoLocString(reinterpret_cast(&sa4))); } @@ -287,10 +254,10 @@ void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, Pack } } } else if (dir == PacketDirection::fromHost) { - _dstIPCard.update(IP4layer->getDstIpAddress().toInt()); - _topIPv4.update(IP4layer->getDstIpAddress().toInt()); + _dstIPCard.update(IP4layer->getDstIPv4Address().toInt()); + _topIPv4.update(IP4layer->getDstIPv4Address().toInt()); if (geo::enabled()) { - if (IPv4tosockaddr(IP4layer->getDstIpAddress(), &sa4)) { + if (IPv4tosockaddr(IP4layer->getDstIPv4Address(), &sa4)) { if (geo::GeoIP().enabled()) { _topGeoLoc.update(geo::GeoIP().getGeoLocString(reinterpret_cast(&sa4))); } @@ -302,10 +269,10 @@ void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, Pack } } else if (IP6layer) { if (dir == PacketDirection::toHost) { - _srcIPCard.update(reinterpret_cast(IP6layer->getSrcIpAddress().toBytes()), 16); - _topIPv6.update(IP6layer->getSrcIpAddress().toString()); + _srcIPCard.update(reinterpret_cast(IP6layer->getSrcIPv6Address().toBytes()), 16); + _topIPv6.update(IP6layer->getSrcIPv6Address().toString()); if (geo::enabled()) { - if (IPv6tosockaddr(IP6layer->getSrcIpAddress(), &sa6)) { + if (IPv6tosockaddr(IP6layer->getSrcIPv6Address(), &sa6)) { if (geo::GeoIP().enabled()) { _topGeoLoc.update(geo::GeoIP().getGeoLocString(reinterpret_cast(&sa6))); } @@ -315,10 +282,10 @@ void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, Pack } } } else if (dir == PacketDirection::fromHost) { - _dstIPCard.update(reinterpret_cast(IP6layer->getDstIpAddress().toBytes()), 16); - _topIPv6.update(IP6layer->getDstIpAddress().toString()); + _dstIPCard.update(reinterpret_cast(IP6layer->getDstIPv6Address().toBytes()), 16); + _topIPv6.update(IP6layer->getDstIPv6Address().toString()); if (geo::enabled()) { - if (IPv6tosockaddr(IP6layer->getDstIpAddress(), &sa6)) { + if (IPv6tosockaddr(IP6layer->getDstIPv6Address(), &sa6)) { if (geo::GeoIP().enabled()) { _topGeoLoc.update(geo::GeoIP().getGeoLocString(reinterpret_cast(&sa6))); } diff --git a/src/handlers/net/NetStreamHandler.h b/src/handlers/net/NetStreamHandler.h index 31e2b5b0b..22761e317 100644 --- a/src/handlers/net/NetStreamHandler.h +++ b/src/handlers/net/NetStreamHandler.h @@ -4,51 +4,49 @@ #pragma once - #include "AbstractMetricsManager.h" #include "PcapInputStream.h" #include "StreamHandler.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wold-style-cast" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma clang diagnostic ignored "-Wrange-loop-analysis" -#include -#include -#include -#pragma GCC diagnostic pop #include #include -namespace vizer::handler::net { +namespace visor::handler::net { -using namespace vizer::input::pcap; +using namespace visor::input::pcap; -class NetworkMetricsBucket final : public vizer::AbstractMetricsBucket +class NetworkMetricsBucket final : public visor::AbstractMetricsBucket { -public: - const uint8_t START_FI_MAP_SIZE = 7; // 2^7 = 128 - const uint8_t MAX_FI_MAP_SIZE = 13; // 2^13 = 8192 protected: mutable std::shared_mutex _mutex; - datasketches::cpc_sketch _srcIPCard; - datasketches::cpc_sketch _dstIPCard; + Cardinality _srcIPCard; + Cardinality _dstIPCard; - datasketches::frequent_items_sketch _topGeoLoc; - datasketches::frequent_items_sketch _topASN; - datasketches::frequent_items_sketch _topIPv4; - datasketches::frequent_items_sketch _topIPv6; // TODO OPTIMIZE not very efficient, should switch to 16 byte uint + TopN _topGeoLoc; + TopN _topASN; + TopN _topIPv4; + TopN _topIPv6; // total numPackets is tracked in base class num_events struct counters { - uint64_t UDP = 0; - uint64_t TCP = 0; - uint64_t OtherL4 = 0; - uint64_t IPv4 = 0; - uint64_t IPv6 = 0; - uint64_t total_in = 0; - uint64_t total_out = 0; + Counter UDP; + Counter TCP; + Counter OtherL4; + Counter IPv4; + Counter IPv6; + Counter total_in; + Counter total_out; + counters() + : UDP("packets", {"udp"}, "Count of UDP packets") + , TCP("packets", {"tcp"}, "Count of TCP packets") + , OtherL4("packets", {"other_l4"}, "Count of packets which are not UDP or TCP") + , IPv4("packets", {"ipv4"}, "Count of IPv4 packets") + , IPv6("packets", {"ipv6"}, "Count of IPv6 packets") + , total_in("packets", {"in"}, "Count of total ingress packets") + , total_out("packets", {"out"}, "Count of total egress packets") + { + } }; counters _counters; @@ -57,15 +55,18 @@ class NetworkMetricsBucket final : public vizer::AbstractMetricsBucket public: NetworkMetricsBucket() - : _srcIPCard() - , _dstIPCard() - , _topGeoLoc(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _topASN(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _topIPv4(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _topIPv6(MAX_FI_MAP_SIZE, START_FI_MAP_SIZE) - , _rate_in() - , _rate_out() + : _srcIPCard("packets", {"cardinality", "src_ips_in"}, "Source IP cardinality") + , _dstIPCard("packets", {"cardinality", "dst_ips_out"}, "Destination IP cardinality") + , _topGeoLoc("packets", {"top_geoLoc"}, "Top GeoIP locations") + , _topASN("packets", {"top_ASN"}, "Top ASNs by IP") + , _topIPv4("packets", {"top_ipv4"}, "Top IPv4 IP addresses") + , _topIPv6("packets", {"top_ipv6"}, "Top IPv6 IP addresses") + , _rate_in("packets", {"rates", "pps_in"}, "Rate of ingress in packets per second") + , _rate_out("packets", {"rates", "pps_out"}, "Rate of egress in packets per second") { + set_event_rate_info("packets", {"rates", "pps_total"}, "Rate of all packets (combined ingress and egress) in packets per second"); + set_num_events_info("packets", {"total"}, "Total packets processed"); + set_num_sample_info("packets", {"deep_samples"}, "Total packets that were sampled for deep inspection"); } // get a copy of the counters @@ -75,9 +76,10 @@ class NetworkMetricsBucket final : public vizer::AbstractMetricsBucket return _counters; } - // vizer::AbstractMetricsBucket + // visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other) override; void to_json(json &j) const override; + void to_prometheus(std::stringstream &out) const override; // must be thread safe as it is called from time window maintenance thread void on_set_read_only() override @@ -90,29 +92,18 @@ class NetworkMetricsBucket final : public vizer::AbstractMetricsBucket void process_packet(bool deep, pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4); }; -class NetworkMetricsManager final : public vizer::AbstractMetricsManager +class NetworkMetricsManager final : public visor::AbstractMetricsManager { public: NetworkMetricsManager(uint periods, int deepSampleRate) - : vizer::AbstractMetricsManager(periods, deepSampleRate) - { - } - -#if 0 - void on_period_shift() override - { - Corrade::Utility::Debug{} << "period shift"; - } - void on_period_evict(const NetworkMetricsBucket *bucket) override + : visor::AbstractMetricsManager(periods, deepSampleRate) { - Corrade::Utility::Debug{} << "evict: " << bucket->_numPackets; } -#endif void process_packet(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, timespec stamp); }; -class NetStreamHandler final : public vizer::StreamMetricsHandler +class NetStreamHandler final : public visor::StreamMetricsHandler { PcapInputStream *_stream; @@ -129,7 +120,7 @@ class NetStreamHandler final : public vizer::StreamMetricsHandler +#include +#include +#include +#include + +#include "NetStreamHandler.h" +#include "PcapInputStream.h" + +using namespace visor::handler::net; +using namespace visor::input::pcap; +using namespace nlohmann; +using nlohmann::json_schema::json_validator; + +TEST_CASE("Net JSON Schema", "[net][iface][json]") +{ + + SECTION("json iface") + { + + PcapInputStream stream{"pcap-test"}; + stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); + stream.config_set("bpf", ""); + stream.config_set("host_spec", "192.168.0.0/24"); + stream.parse_host_spec(); + + NetStreamHandler net_handler{"net-test", &stream, 5, 100}; + net_handler.config_set("recorded_stream", true); + + net_handler.start(); + stream.start(); + stream.stop(); + net_handler.stop(); + + json net_json; + net_handler.metrics()->window_merged_json(net_json, net_handler.schema_key(), 5); + std::ifstream sfile("handlers/net/tests/window-schema.json"); + CHECK(sfile.is_open()); + std::string schema; + + sfile.seekg(0, std::ios::end); + schema.reserve(sfile.tellg()); + sfile.seekg(0, std::ios::beg); + + schema.assign((std::istreambuf_iterator(sfile)), std::istreambuf_iterator()); + json_validator validator; + + auto schema_json = json::parse(schema); + + try { + validator.set_root_schema(schema_json); + validator.validate(net_json); + } catch (const std::exception &e) { + FAIL(e.what()); + } + } +} diff --git a/src/handlers/net/tests/test_net_layer.cpp b/src/handlers/net/tests/test_net_layer.cpp index 0fa637d9e..fd86da585 100644 --- a/src/handlers/net/tests/test_net_layer.cpp +++ b/src/handlers/net/tests/test_net_layer.cpp @@ -4,14 +4,14 @@ #include "NetStreamHandler.h" #include "PcapInputStream.h" -using namespace vizer::handler::net; -using namespace vizer::input::pcap; +using namespace visor::handler::net; +using namespace visor::input::pcap; TEST_CASE("Parse net (dns) UDP IPv4 tests", "[pcap][ipv4][udp][net]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv4_udp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_udp.pcap"); stream.config_set("bpf", std::string()); NetStreamHandler net_handler{"net-test", &stream, 1, 100}; @@ -22,7 +22,7 @@ TEST_CASE("Parse net (dns) UDP IPv4 tests", "[pcap][ipv4][udp][net]") stream.stop(); auto counters = net_handler.metrics()->bucket(0)->counters(); - auto event_data = net_handler.metrics()->bucket(0)->event_data(); + auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); CHECK(net_handler.metrics()->current_periods() == 1); CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706414); @@ -33,16 +33,16 @@ TEST_CASE("Parse net (dns) UDP IPv4 tests", "[pcap][ipv4][udp][net]") CHECK(net_handler.metrics()->bucket(0)->period_length() == 6); - CHECK(event_data.num_events == 140); - CHECK(counters.UDP == 140); - CHECK(counters.IPv4 == 140); - CHECK(counters.IPv6 == 0); + CHECK(event_data.num_events->value() == 140); + CHECK(counters.UDP.value() == 140); + CHECK(counters.IPv4.value() == 140); + CHECK(counters.IPv6.value() == 0); } TEST_CASE("Parse net (dns) TCP IPv4 tests", "[pcap][ipv4][tcp][net]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv4_tcp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_tcp.pcap"); stream.config_set("bpf", ""); NetStreamHandler net_handler{"net-test", &stream, 1, 100}; @@ -53,21 +53,21 @@ TEST_CASE("Parse net (dns) TCP IPv4 tests", "[pcap][ipv4][tcp][net]") stream.stop(); auto counters = net_handler.metrics()->bucket(0)->counters(); - auto event_data = net_handler.metrics()->bucket(0)->event_data(); + auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706433); CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 56403000); - CHECK(event_data.num_events == 2100); - CHECK(counters.TCP == 2100); - CHECK(counters.IPv4 == 2100); - CHECK(counters.IPv6 == 0); + CHECK(event_data.num_events->value() == 2100); + CHECK(counters.TCP.value() == 2100); + CHECK(counters.IPv4.value() == 2100); + CHECK(counters.IPv6.value() == 0); } TEST_CASE("Parse net (dns) UDP IPv6 tests", "[pcap][ipv6][udp][net]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv6_udp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv6_udp.pcap"); stream.config_set("bpf", ""); NetStreamHandler net_handler{"net-test", &stream, 1, 100}; @@ -78,21 +78,21 @@ TEST_CASE("Parse net (dns) UDP IPv6 tests", "[pcap][ipv6][udp][net]") net_handler.stop(); auto counters = net_handler.metrics()->bucket(0)->counters(); - auto event_data = net_handler.metrics()->bucket(0)->event_data(); + auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706365); CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 513271000); - CHECK(event_data.num_events == 140); - CHECK(counters.UDP == 140); - CHECK(counters.IPv4 == 0); - CHECK(counters.IPv6 == 140); + CHECK(event_data.num_events->value() == 140); + CHECK(counters.UDP.value() == 140); + CHECK(counters.IPv4.value() == 0); + CHECK(counters.IPv6.value() == 140); } TEST_CASE("Parse net (dns) TCP IPv6 tests", "[pcap][ipv6][tcp][net]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_ipv6_tcp.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_ipv6_tcp.pcap"); stream.config_set("bpf", ""); NetStreamHandler net_handler{"net-test", &stream, 1, 100}; @@ -103,21 +103,21 @@ TEST_CASE("Parse net (dns) TCP IPv6 tests", "[pcap][ipv6][tcp][net]") net_handler.stop(); auto counters = net_handler.metrics()->bucket(0)->counters(); - auto event_data = net_handler.metrics()->bucket(0)->event_data(); + auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706308); CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 958184000); - CHECK(event_data.num_events == 1800); - CHECK(counters.TCP == 1800); - CHECK(counters.IPv4 == 0); - CHECK(counters.IPv6 == 1800); + CHECK(event_data.num_events->value() == 1800); + CHECK(counters.TCP.value() == 1800); + CHECK(counters.IPv4.value() == 0); + CHECK(counters.IPv6.value() == 1800); } TEST_CASE("Parse net (dns) random UDP/TCP tests", "[pcap][net]") { PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "fixtures/dns_udp_tcp_random.pcap"); + stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); stream.config_set("bpf", ""); stream.config_set("host_spec", "192.168.0.0/24"); stream.parse_host_spec(); @@ -130,24 +130,25 @@ TEST_CASE("Parse net (dns) random UDP/TCP tests", "[pcap][net]") net_handler.stop(); auto counters = net_handler.metrics()->bucket(0)->counters(); - auto event_data = net_handler.metrics()->bucket(0)->event_data(); + auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1614874231); CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 565771000); // confirmed with wireshark - CHECK(event_data.num_events == 16147); - CHECK(event_data.num_samples == 16147); - CHECK(counters.TCP == 13176); - CHECK(counters.UDP == 2971); - CHECK(counters.IPv4 == 16147); - CHECK(counters.IPv6 == 0); - CHECK(counters.OtherL4 == 0); - CHECK(counters.total_in == 6648); - CHECK(counters.total_out == 9499); + CHECK(event_data.num_events->value() == 16147); + CHECK(event_data.num_samples->value() == 16147); + CHECK(counters.TCP.value() == 13176); + CHECK(counters.UDP.value() == 2971); + CHECK(counters.IPv4.value() == 16147); + CHECK(counters.IPv6.value() == 0); + CHECK(counters.OtherL4.value() == 0); + CHECK(counters.total_in.value() == 6648); + CHECK(counters.total_out.value() == 9499); nlohmann::json j; net_handler.metrics()->bucket(0)->to_json(j); + CHECK(j["cardinality"]["dst_ips_out"] == 1); CHECK(j["cardinality"]["src_ips_in"] == 1); CHECK(j["top_ipv4"][0]["estimate"] == 16147); diff --git a/src/handlers/net/tests/window-schema.json b/src/handlers/net/tests/window-schema.json new file mode 100644 index 000000000..b0a3b3c96 --- /dev/null +++ b/src/handlers/net/tests/window-schema.json @@ -0,0 +1,423 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "$id": "http://example.com/example.json", + "type": "object", + "title": "The root schema", + "description": "The root schema comprises the entire JSON document.", + "default": {}, + "examples": [ + { + "5m": { + "packets": { + "cardinality": { + "dst_ips_out": 1, + "src_ips_in": 1 + }, + "deep_samples": 16147, + "in": 6648, + "ipv4": 16147, + "ipv6": 0, + "other_l4": 0, + "out": 9499, + "period": { + "length": 31, + "start_ts": 1614874231 + }, + "tcp": 13176, + "top_ASN": [], + "top_geoLoc": [], + "top_ipv4": [ + { + "estimate": 16147, + "name": "8.8.8.8" + } + ], + "top_ipv6": [], + "total": 16147, + "udp": 2971 + } + } + } + ], + "required": [ + "5m" + ], + "properties": { + "5m": { + "$id": "#/properties/5m", + "type": "object", + "title": "The 5m schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "packets": { + "cardinality": { + "dst_ips_out": 1, + "src_ips_in": 1 + }, + "deep_samples": 16147, + "in": 6648, + "ipv4": 16147, + "ipv6": 0, + "other_l4": 0, + "out": 9499, + "period": { + "length": 31, + "start_ts": 1614874231 + }, + "tcp": 13176, + "top_ASN": [], + "top_geoLoc": [], + "top_ipv4": [ + { + "estimate": 16147, + "name": "8.8.8.8" + } + ], + "top_ipv6": [], + "total": 16147, + "udp": 2971 + } + } + ], + "required": [ + "packets" + ], + "properties": { + "packets": { + "$id": "#/properties/5m/properties/packets", + "type": "object", + "title": "The packets schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "cardinality": { + "dst_ips_out": 1, + "src_ips_in": 1 + }, + "deep_samples": 16147, + "in": 6648, + "ipv4": 16147, + "ipv6": 0, + "other_l4": 0, + "out": 9499, + "period": { + "length": 31, + "start_ts": 1614874231 + }, + "tcp": 13176, + "top_ASN": [], + "top_geoLoc": [], + "top_ipv4": [ + { + "estimate": 16147, + "name": "8.8.8.8" + } + ], + "top_ipv6": [], + "total": 16147, + "udp": 2971 + } + ], + "required": [ + "cardinality", + "deep_samples", + "in", + "ipv4", + "ipv6", + "other_l4", + "out", + "period", + "tcp", + "top_ASN", + "top_geoLoc", + "top_ipv4", + "top_ipv6", + "total", + "udp" + ], + "properties": { + "cardinality": { + "$id": "#/properties/5m/properties/packets/properties/cardinality", + "type": "object", + "title": "The cardinality schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "dst_ips_out": 1, + "src_ips_in": 1 + } + ], + "required": [ + "dst_ips_out", + "src_ips_in" + ], + "properties": { + "dst_ips_out": { + "$id": "#/properties/5m/properties/packets/properties/cardinality/properties/dst_ips_out", + "type": "integer", + "title": "The dst_ips_out schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1 + ] + }, + "src_ips_in": { + "$id": "#/properties/5m/properties/packets/properties/cardinality/properties/src_ips_in", + "type": "integer", + "title": "The src_ips_in schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1 + ] + } + }, + "additionalProperties": false + }, + "deep_samples": { + "$id": "#/properties/5m/properties/packets/properties/deep_samples", + "type": "integer", + "title": "The deep_samples schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 16147 + ] + }, + "in": { + "$id": "#/properties/5m/properties/packets/properties/in", + "type": "integer", + "title": "The in schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 6648 + ] + }, + "ipv4": { + "$id": "#/properties/5m/properties/packets/properties/ipv4", + "type": "integer", + "title": "The ipv4 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 16147 + ] + }, + "ipv6": { + "$id": "#/properties/5m/properties/packets/properties/ipv6", + "type": "integer", + "title": "The ipv6 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "other_l4": { + "$id": "#/properties/5m/properties/packets/properties/other_l4", + "type": "integer", + "title": "The other_l4 schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 0 + ] + }, + "out": { + "$id": "#/properties/5m/properties/packets/properties/out", + "type": "integer", + "title": "The out schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 9499 + ] + }, + "period": { + "$id": "#/properties/5m/properties/packets/properties/period", + "type": "object", + "title": "The period schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "length": 31, + "start_ts": 1614874231 + } + ], + "required": [ + "length", + "start_ts" + ], + "properties": { + "length": { + "$id": "#/properties/5m/properties/packets/properties/period/properties/length", + "type": "integer", + "title": "The length schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 31 + ] + }, + "start_ts": { + "$id": "#/properties/5m/properties/packets/properties/period/properties/start_ts", + "type": "integer", + "title": "The start_ts schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 1614874231 + ] + } + }, + "additionalProperties": false + }, + "tcp": { + "$id": "#/properties/5m/properties/packets/properties/tcp", + "type": "integer", + "title": "The tcp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 13176 + ] + }, + "top_ASN": { + "$id": "#/properties/5m/properties/packets/properties/top_ASN", + "type": "array", + "title": "The top_ASN schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_ASN/items" + } + }, + "top_geoLoc": { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc", + "type": "array", + "title": "The top_geoLoc schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_geoLoc/items" + } + }, + "top_ipv4": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4", + "type": "array", + "title": "The top_ipv4 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [ + { + "estimate": 16147, + "name": "8.8.8.8" + } + ] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items", + "anyOf": [ + { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items/anyOf/0", + "type": "object", + "title": "The first anyOf schema", + "description": "An explanation about the purpose of this instance.", + "default": {}, + "examples": [ + { + "estimate": 16147, + "name": "8.8.8.8" + } + ], + "required": [ + "estimate", + "name" + ], + "properties": { + "estimate": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items/anyOf/0/properties/estimate", + "type": "integer", + "title": "The estimate schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 16147 + ] + }, + "name": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv4/items/anyOf/0/properties/name", + "type": "string", + "title": "The name schema", + "description": "An explanation about the purpose of this instance.", + "default": "", + "examples": [ + "8.8.8.8" + ] + } + }, + "additionalProperties": false + } + ] + } + }, + "top_ipv6": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6", + "type": "array", + "title": "The top_ipv6 schema", + "description": "An explanation about the purpose of this instance.", + "default": [], + "examples": [ + [] + ], + "additionalItems": true, + "items": { + "$id": "#/properties/5m/properties/packets/properties/top_ipv6/items" + } + }, + "total": { + "$id": "#/properties/5m/properties/packets/properties/total", + "type": "integer", + "title": "The total schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 16147 + ] + }, + "udp": { + "$id": "#/properties/5m/properties/packets/properties/udp", + "type": "integer", + "title": "The udp schema", + "description": "An explanation about the purpose of this instance.", + "default": 0, + "examples": [ + 2971 + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/src/handlers/static_plugins.h b/src/handlers/static_plugins.h index 6e231349a..617da13ab 100644 --- a/src/handlers/static_plugins.h +++ b/src/handlers/static_plugins.h @@ -1,8 +1,8 @@ int import_handler_plugins() { - CORRADE_PLUGIN_IMPORT(VizerHandlerNet); - CORRADE_PLUGIN_IMPORT(VizerHandlerDns); + CORRADE_PLUGIN_IMPORT(VisorHandlerNet); + CORRADE_PLUGIN_IMPORT(VisorHandlerDns); return 0; } diff --git a/src/inputs/CMakeLists.txt b/src/inputs/CMakeLists.txt index 82ab0ad62..8e4e62dfd 100644 --- a/src/inputs/CMakeLists.txt +++ b/src/inputs/CMakeLists.txt @@ -1,4 +1,4 @@ add_subdirectory(pcap) -set(VIZER_STATIC_PLUGINS ${VIZER_STATIC_PLUGINS} PARENT_SCOPE) +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} PARENT_SCOPE) diff --git a/src/inputs/README.md b/src/inputs/README.md new file mode 100644 index 000000000..788911322 --- /dev/null +++ b/src/inputs/README.md @@ -0,0 +1,10 @@ +# Stream Input Modules + +This directory contains the built-in stream input modules. These are designed to tap into high throughput, information +dense data streams and expose events to [stream handlers](/src/handlers) so that they may extract and summarize +important information. + +See the individual READMEs for more information: + +* [Packet Capture](pcap/) + diff --git a/src/inputs/pcap/CMakeLists.txt b/src/inputs/pcap/CMakeLists.txt index 5b68e39f1..9f9967ca9 100644 --- a/src/inputs/pcap/CMakeLists.txt +++ b/src/inputs/pcap/CMakeLists.txt @@ -1,47 +1,29 @@ message(STATUS "Input Module: Pcap") -pkg_check_modules(LIBPCPP REQUIRED PcapPlusPlus) - set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) -corrade_add_static_plugin(VizerInputPcap ${CMAKE_CURRENT_BINARY_DIR} +corrade_add_static_plugin(VisorInputPcap ${CMAKE_CURRENT_BINARY_DIR} PcapInput.conf PcapInputModulePlugin.cpp PcapInputStream.cpp afpacket.cpp utils.cpp ) -add_library(Vizer::Input::Pcap ALIAS VizerInputPcap) +add_library(Visor::Input::Pcap ALIAS VisorInputPcap) -target_include_directories(VizerInputPcap - PUBLIC - ${LIBPCPP_INCLUDE_DIRS} +target_include_directories(VisorInputPcap INTERFACE $ ) -if (APPLE) - target_link_libraries(VizerInputPcap - PRIVATE - "-L${LIBPCPP_LIBRARY_DIRS}" - ${LIBPCPP_LIBRARIES} - "-framework CoreFoundation" - "-framework SystemConfiguration" - ) -else () - target_link_libraries(VizerInputPcap - PRIVATE - ${LIBPCPP_LDFLAGS} - ${LIBPCPP_LIBRARIES} - ) -endif () - -target_link_libraries(VizerInputPcap +target_link_libraries(VisorInputPcap PUBLIC - Vizer::Core + ${CONAN_LIBS_PCAPPLUSPLUS} + ${CONAN_LIBS_LIBPCAP} + Visor::Core ) -set(VIZER_STATIC_PLUGINS ${VIZER_STATIC_PLUGINS} Vizer::Input::Pcap PARENT_SCOPE) +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} Visor::Input::Pcap PARENT_SCOPE) ## TEST SUITE add_executable(unit-tests-input-pcap @@ -51,10 +33,10 @@ add_executable(unit-tests-input-pcap ) target_link_libraries(unit-tests-input-pcap - PRIVATE Vizer::Input::Pcap + PRIVATE Visor::Input::Pcap ) add_test(NAME unit-tests-input-pcap - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/tests + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src COMMAND unit-tests-input-pcap ) diff --git a/src/inputs/pcap/PcapInputModulePlugin.cpp b/src/inputs/pcap/PcapInputModulePlugin.cpp index 98cec7f6e..b718f50f1 100644 --- a/src/inputs/pcap/PcapInputModulePlugin.cpp +++ b/src/inputs/pcap/PcapInputModulePlugin.cpp @@ -6,10 +6,10 @@ #include #include -CORRADE_PLUGIN_REGISTER(VizerInputPcap, vizer::input::pcap::PcapInputModulePlugin, - "dev.vizer.module.input/1.0") +CORRADE_PLUGIN_REGISTER(VisorInputPcap, visor::input::pcap::PcapInputModulePlugin, + "dev.visor.module.input/1.0") -namespace vizer::input::pcap { +namespace visor::input::pcap { void PcapInputModulePlugin::_setup_routes(HttpServer &svr) { diff --git a/src/inputs/pcap/PcapInputModulePlugin.h b/src/inputs/pcap/PcapInputModulePlugin.h index 795b080fd..4334a0d63 100644 --- a/src/inputs/pcap/PcapInputModulePlugin.h +++ b/src/inputs/pcap/PcapInputModulePlugin.h @@ -8,9 +8,9 @@ #include "InputModulePlugin.h" #include "PcapInputStream.h" -namespace vizer::input::pcap { +namespace visor::input::pcap { -class PcapInputModulePlugin : public vizer::InputModulePlugin +class PcapInputModulePlugin : public visor::InputModulePlugin { protected: @@ -22,7 +22,7 @@ class PcapInputModulePlugin : public vizer::InputModulePlugin public: explicit PcapInputModulePlugin(Corrade::PluginManager::AbstractManager &manager, const std::string &plugin) - : vizer::InputModulePlugin{manager, plugin} + : visor::InputModulePlugin{manager, plugin} { } diff --git a/src/inputs/pcap/PcapInputStream.cpp b/src/inputs/pcap/PcapInputStream.cpp index 9b3d0b2e4..5a7a01def 100644 --- a/src/inputs/pcap/PcapInputStream.cpp +++ b/src/inputs/pcap/PcapInputStream.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #pragma GCC diagnostic pop #include #include @@ -27,7 +28,7 @@ using namespace std::chrono; -namespace vizer::input::pcap { +namespace visor::input::pcap { // static callbacks for PcapPlusPlus static void _tcp_message_ready_cb(int8_t side, const pcpp::TcpStreamData &tcpData, void *cookie) @@ -54,19 +55,20 @@ static void _packet_arrives_cb(pcpp::RawPacket *rawPacket, [[maybe_unused]] pcpp stream->process_raw_packet(rawPacket); } -static void _pcap_stats_update([[maybe_unused]] pcpp::IPcapDevice::PcapStats &stats, [[maybe_unused]] void *cookie) +static void _pcap_stats_update([[maybe_unused]] pcpp::IPcapDevice::PcapStats& stats, [[maybe_unused]] void *cookie) { // auto stream = static_cast(cookie); // TODO expose this } PcapInputStream::PcapInputStream(const std::string &name) - : vizer::InputStream(name) + : visor::InputStream(name) , _pcapDevice(nullptr) , _tcp_reassembly(_tcp_message_ready_cb, this, _tcp_connection_start_cb, - _tcp_connection_end_cb) + _tcp_connection_end_cb, + {true, 5, 500, 50}) { } @@ -91,6 +93,10 @@ void PcapInputStream::start() return; } + if (config_exists("debug")) { + pcpp::LoggerPP::getInstance().setAllModlesToLogLevel(pcpp::LoggerPP::LogLevel::Debug); + } + // live capture assert(config_exists("iface")); assert(config_exists("bpf")); @@ -213,20 +219,20 @@ void PcapInputStream::process_raw_packet(pcpp::RawPacket *rawPacket) auto IP6layer = packet.getLayerOfType(); if (IP4layer) { for (auto &i : _hostIPv4) { - if (IP4layer->getDstIpAddress().matchSubnet(i.address, i.mask)) { + if (IP4layer->getDstIPv4Address().matchSubnet(i.address, i.mask)) { dir = PacketDirection::toHost; break; - } else if (IP4layer->getSrcIpAddress().matchSubnet(i.address, i.mask)) { + } else if (IP4layer->getSrcIPv4Address().matchSubnet(i.address, i.mask)) { dir = PacketDirection::fromHost; break; } } } else if (IP6layer) { for (auto &i : _hostIPv6) { - if (IP6layer->getDstIpAddress().matchSubnet(i.address, i.mask)) { + if (IP6layer->getDstIPv6Address().matchSubnet(i.address, i.mask)) { dir = PacketDirection::toHost; break; - } else if (IP6layer->getSrcIpAddress().matchSubnet(i.address, i.mask)) { + } else if (IP6layer->getSrcIPv6Address().matchSubnet(i.address, i.mask)) { dir = PacketDirection::fromHost; break; } @@ -254,8 +260,9 @@ void PcapInputStream::_open_pcap(const std::string &fileName, const std::string auto reader = pcpp::IFileReaderDevice::getReader(fileName.c_str()); // try to open the file device - if (!reader->open()) + if (!reader->open()) { throw PcapException("Cannot open pcap/pcapng file"); + } // set BPF filter if set by the user if (bpfFilter != "") { @@ -319,7 +326,14 @@ void PcapInputStream::_open_libpcap_iface(const std::string &bpfFilter) NOTE: the packet buffer timeout cannot be used to cause calls that read packets to return within a limited period of time, because, on some platforms, the packet buffer timeout isn't supported, and, on other platforms, the timer doesn't start until at least one packet arrives. This means that the packet buffer timeout should NOT be used, for example, in an interactive application to allow the packet capture loop to ``poll'' for user input periodically, as there's no guarantee that a call reading packets will return after the timeout expires even if no packets have arrived. The packet buffer timeout is set with pcap_set_timeout(). */ - config.packetBufferTimeoutMs = 100; + config.packetBufferTimeoutMs = 10; + /* + * @param[in] snapshotLength Snapshot length for capturing packets. Default value is 0 which means use the default value. + * A snapshot length of 262144 should be big enough for maximum-size Linux loopback packets (65549) and some USB packets + * captured with USBPcap (> 131072, < 262144). A snapshot length of 65535 should be sufficient, on most if not all networks, + * to capture all the data available from the packet. + */ + config.snapshotLength = 1000; // try to open device if (!_pcapDevice->open(config)) { @@ -415,4 +429,4 @@ void PcapInputStream::parse_host_spec() } } -} \ No newline at end of file +} diff --git a/src/inputs/pcap/PcapInputStream.h b/src/inputs/pcap/PcapInputStream.h index 496735da8..53d9bef0c 100644 --- a/src/inputs/pcap/PcapInputStream.h +++ b/src/inputs/pcap/PcapInputStream.h @@ -23,7 +23,7 @@ #include "afpacket.h" #endif -namespace vizer::input::pcap { +namespace visor::input::pcap { enum class PcapSource { unknown, @@ -37,7 +37,7 @@ enum class PacketDirection { unknown }; -class PcapInputStream : public vizer::InputStream +class PcapInputStream : public visor::InputStream { private: @@ -72,7 +72,7 @@ class PcapInputStream : public vizer::InputStream PcapInputStream(const std::string &name); ~PcapInputStream(); - // vizer::AbstractModule + // visor::AbstractModule std::string schema_key() const override { return "pcap"; diff --git a/src/inputs/pcap/README.md b/src/inputs/pcap/README.md new file mode 100644 index 000000000..7386cd1b5 --- /dev/null +++ b/src/inputs/pcap/README.md @@ -0,0 +1,16 @@ +# Packet Capture Stream Input + +This directory contains the main packet capture stream input tap. + +It uses libpcap or AF_PACKET (Linux) to tap into ethernet interfaces and expose the following events: + +* Packet +* UDP Packet +* TCP connection start +* TCP message ready +* TCP connection end + +It supports tcpdump compatible bpf filter strings to limit events. + +libpcap library has a limitation that traffic may be captured only once per interface per process. AF_PACKET does not +have this limitation. \ No newline at end of file diff --git a/src/inputs/pcap/afpacket.cpp b/src/inputs/pcap/afpacket.cpp index 4b9644715..1e100efb5 100644 --- a/src/inputs/pcap/afpacket.cpp +++ b/src/inputs/pcap/afpacket.cpp @@ -19,7 +19,7 @@ #include #include -namespace vizer::input::pcap { +namespace visor::input::pcap { AFPacket::AFPacket(PcapInputStream *stream, pcpp::OnPacketArrivesCallback cb, std::string filter, std::string interface_name, @@ -76,16 +76,16 @@ void AFPacket::walk_block(struct block_desc *pbd) uint64_t bytes = 0; struct tpacket3_hdr *ppd; - ppd = reinterpret_cast(reinterpret_cast(pbd) + pbd->h1.offset_to_first_pkt); + ppd = (struct tpacket3_hdr *)((uint8_t *)pbd + pbd->h1.offset_to_first_pkt); for (i = 0; i < num_pkts; ++i) { bytes += ppd->tp_snaplen; - auto data_pointer = reinterpret_cast(pbd) + ppd->tp_mac; + auto data_pointer = (uint8_t *)ppd + ppd->tp_mac; pcpp::RawPacket packet(data_pointer, ppd->tp_snaplen, timespec{pbd->h1.ts_last_pkt.ts_sec, pbd->h1.ts_last_pkt.ts_nsec}, false, pcpp::LINKTYPE_ETHERNET); cb(&packet, nullptr, inputStream); - ppd = reinterpret_cast(reinterpret_cast(pbd) + ppd->tp_next_offset); + ppd = (struct tpacket3_hdr *)((uint8_t *)ppd + ppd->tp_next_offset); } } @@ -236,6 +236,8 @@ void AFPacket::start_capture() // Configure the packet socket. setup(); + running = true; + cap_thread = std::make_unique([this] { unsigned int current_block_num = 0; diff --git a/src/inputs/pcap/afpacket.h b/src/inputs/pcap/afpacket.h index 019beda0a..2858680ab 100644 --- a/src/inputs/pcap/afpacket.h +++ b/src/inputs/pcap/afpacket.h @@ -17,7 +17,7 @@ #include #include -namespace vizer::input::pcap { +namespace visor::input::pcap { const int physical_offset = TPACKET_ALIGN(sizeof(struct tpacket3_hdr)); @@ -82,4 +82,4 @@ class AFPacket final void filter_try_compile(const std::string &, struct sock_fprog *, int); -} // namespace vizer +} // namespace visor diff --git a/src/inputs/pcap/tests/test_parse_pcap.cpp b/src/inputs/pcap/tests/test_parse_pcap.cpp index 7e1240a65..09ff1986a 100644 --- a/src/inputs/pcap/tests/test_parse_pcap.cpp +++ b/src/inputs/pcap/tests/test_parse_pcap.cpp @@ -16,7 +16,7 @@ TEST_CASE("Top K Src Ports", "[pcap][ipv4][topk][dns][udp]") { - pcpp::IFileReaderDevice *reader = pcpp::IFileReaderDevice::getReader("fixtures/dns_ipv4_udp.pcap"); + pcpp::IFileReaderDevice *reader = pcpp::IFileReaderDevice::getReader("tests/fixtures/dns_ipv4_udp.pcap"); CHECK(reader->open()); diff --git a/src/inputs/pcap/tests/test_utils.cpp b/src/inputs/pcap/tests/test_utils.cpp index bc7191f29..30fc7de27 100644 --- a/src/inputs/pcap/tests/test_utils.cpp +++ b/src/inputs/pcap/tests/test_utils.cpp @@ -2,8 +2,8 @@ #include #include -using namespace vizer; -using namespace vizer::input::pcap; +using namespace visor; +using namespace visor::input::pcap; TEST_CASE("parseHostSpec", "[utils]") { diff --git a/src/inputs/pcap/utils.cpp b/src/inputs/pcap/utils.cpp index 69723478f..8467b39c2 100644 --- a/src/inputs/pcap/utils.cpp +++ b/src/inputs/pcap/utils.cpp @@ -9,7 +9,7 @@ #include #include -namespace vizer::input::pcap { +namespace visor::input::pcap { template static void split(const std::string &s, char delim, Out result) diff --git a/src/inputs/pcap/utils.h b/src/inputs/pcap/utils.h index bcaa45d8d..1e98b7b32 100644 --- a/src/inputs/pcap/utils.h +++ b/src/inputs/pcap/utils.h @@ -11,7 +11,7 @@ #include #include -namespace vizer::input::pcap { +namespace visor::input::pcap { class PcapException : public std::runtime_error { diff --git a/src/inputs/static_plugins.h b/src/inputs/static_plugins.h index 6eaf0cf04..f32a75be8 100644 --- a/src/inputs/static_plugins.h +++ b/src/inputs/static_plugins.h @@ -1,7 +1,7 @@ int import_input_plugins() { - CORRADE_PLUGIN_IMPORT(VizerInputPcap); + CORRADE_PLUGIN_IMPORT(VisorInputPcap); return 0; } diff --git a/src/tests/test_geoip.cpp b/src/tests/test_geoip.cpp index 4815995fa..b0196c76c 100644 --- a/src/tests/test_geoip.cpp +++ b/src/tests/test_geoip.cpp @@ -8,35 +8,35 @@ TEST_CASE("GeoIP", "[geoip]") SECTION("Geo enablement") { - CHECK(!vizer::geo::enabled()); - CHECK_THROWS(vizer::geo::GeoIP().enable("nonexistent.mmdb")); - CHECK(!vizer::geo::enabled()); - CHECK(vizer::geo::GeoIP().getGeoLocString("2a02:dac0::") == ""); - CHECK(vizer::geo::GeoASN().getASNString("2a02:dac0::") == ""); - CHECK_NOTHROW(vizer::geo::GeoIP().enable("fixtures/GeoIP2-City-Test.mmdb")); - CHECK(vizer::geo::enabled()); - CHECK_NOTHROW(vizer::geo::GeoASN().enable("fixtures/GeoIP2-ISP-Test.mmdb")); - CHECK(vizer::geo::enabled()); + CHECK(!visor::geo::enabled()); + CHECK_THROWS(visor::geo::GeoIP().enable("nonexistent.mmdb")); + CHECK(!visor::geo::enabled()); + CHECK(visor::geo::GeoIP().getGeoLocString("2a02:dac0::") == ""); + CHECK(visor::geo::GeoASN().getASNString("2a02:dac0::") == ""); + CHECK_NOTHROW(visor::geo::GeoIP().enable("tests/fixtures/GeoIP2-City-Test.mmdb")); + CHECK(visor::geo::enabled()); + CHECK_NOTHROW(visor::geo::GeoASN().enable("tests/fixtures/GeoIP2-ISP-Test.mmdb")); + CHECK(visor::geo::enabled()); } SECTION("basic Geo lookup") { - CHECK(vizer::geo::GeoIP().enabled()); - CHECK(vizer::geo::GeoIP().getGeoLocString("2a02:dac0::") == "EU/Russia"); - CHECK(vizer::geo::GeoIP().getGeoLocString("89.160.20.112") == "EU/Sweden/E/Linköping"); - CHECK(vizer::geo::GeoIP().getGeoLocString("216.160.83.56") == "NA/United States/WA/Milton"); + CHECK(visor::geo::GeoIP().enabled()); + CHECK(visor::geo::GeoIP().getGeoLocString("2a02:dac0::") == "EU/Russia"); + CHECK(visor::geo::GeoIP().getGeoLocString("89.160.20.112") == "EU/Sweden/E/Linköping"); + CHECK(visor::geo::GeoIP().getGeoLocString("216.160.83.56") == "NA/United States/WA/Milton"); } SECTION("basic ASN lookup") { - CHECK(vizer::geo::GeoASN().enabled()); - CHECK(vizer::geo::GeoASN().getASNString("1.128.0.0") == "1221/Telstra Pty Ltd"); + CHECK(visor::geo::GeoASN().enabled()); + CHECK(visor::geo::GeoASN().getASNString("1.128.0.0") == "1221/Telstra Pty Ltd"); } SECTION("basic unknown") { - CHECK(vizer::geo::GeoASN().enabled()); - CHECK(vizer::geo::GeoASN().getASNString("6.6.6.6") == "Unknown"); + CHECK(visor::geo::GeoASN().enabled()); + CHECK(visor::geo::GeoASN().getASNString("6.6.6.6") == "Unknown"); } SECTION("basic Geo lookup, socket") @@ -44,11 +44,11 @@ TEST_CASE("GeoIP", "[geoip]") struct sockaddr_in sa4; sa4.sin_family = AF_INET; inet_pton(AF_INET, "89.160.20.112", &sa4.sin_addr.s_addr); - CHECK(vizer::geo::GeoIP().getGeoLocString((struct sockaddr *)&sa4) == "EU/Sweden/E/Linköping"); + CHECK(visor::geo::GeoIP().getGeoLocString((struct sockaddr *)&sa4) == "EU/Sweden/E/Linköping"); struct sockaddr_in6 sa6; sa6.sin6_family = AF_INET6; inet_pton(AF_INET6, "2a02:dac0::", &sa6.sin6_addr); - CHECK(vizer::geo::GeoIP().getGeoLocString((struct sockaddr *)&sa6) == "EU/Russia"); + CHECK(visor::geo::GeoIP().getGeoLocString((struct sockaddr *)&sa6) == "EU/Russia"); } SECTION("basic ASN lookup, socket") @@ -56,10 +56,10 @@ TEST_CASE("GeoIP", "[geoip]") struct sockaddr_in sa4; sa4.sin_family = AF_INET; inet_pton(AF_INET, "1.128.0.0", &sa4.sin_addr.s_addr); - CHECK(vizer::geo::GeoASN().getASNString((struct sockaddr *)&sa4) == "1221/Telstra Pty Ltd"); + CHECK(visor::geo::GeoASN().getASNString((struct sockaddr *)&sa4) == "1221/Telstra Pty Ltd"); struct sockaddr_in6 sa6; sa6.sin6_family = AF_INET6; inet_pton(AF_INET6, "2401:8080::", &sa6.sin6_addr); - CHECK(vizer::geo::GeoASN().getASNString((struct sockaddr *)&sa6) == "237/Merit Network Inc."); + CHECK(visor::geo::GeoASN().getASNString((struct sockaddr *)&sa6) == "237/Merit Network Inc."); } } diff --git a/src/tests/test_metrics.cpp b/src/tests/test_metrics.cpp index 4be9d8644..8a2f661e8 100644 --- a/src/tests/test_metrics.cpp +++ b/src/tests/test_metrics.cpp @@ -1,7 +1,7 @@ #include "AbstractMetricsManager.h" #include -using namespace vizer; +using namespace visor; class TestMetricsBucket : public AbstractMetricsBucket { @@ -13,5 +13,17 @@ class TestMetricsManager : public AbstractMetricsManager TEST_CASE("metrics", "[metrics]") { - // TestMetricsManager metrics{1, 100}; + Counter c("root", {"test", "metric"}, "A test metric"); + + json j; + c.name_json_assign(j, 58); + CHECK(j["test"]["metric"] == 58); + ++c; + c.to_json(j["top"]); + CHECK(j["top"]["test"]["metric"] == 1); + + json j2; + Counter c2("root", {"test", "metric"}, "A test metric"); + c2.name_json_assign(j2, {"add"}, 60); + CHECK(j2["test"]["metric"]["add"] == 60); } diff --git a/src/vizer_config.h.in b/src/visor_config.h.in similarity index 69% rename from src/vizer_config.h.in rename to src/visor_config.h.in index e76752a3a..53412be0d 100644 --- a/src/vizer_config.h.in +++ b/src/visor_config.h.in @@ -4,5 +4,5 @@ #pragma once -#cmakedefine VIZER_VERSION_NUM "@VIZER_VERSION_NUM@" -#cmakedefine VIZER_VERSION "@VIZER_VERSION@" +#cmakedefine VISOR_VERSION_NUM "@VISOR_VERSION_NUM@" +#cmakedefine VISOR_VERSION "@VISOR_VERSION@"