diff --git a/.github/actions/build-cpp/Dockerfile b/.github/actions/build-cpp/Dockerfile index 44e1a8354..4abee63f9 100644 --- a/.github/actions/build-cpp/Dockerfile +++ b/.github/actions/build-cpp/Dockerfile @@ -1,10 +1,10 @@ FROM debian:bullseye-slim LABEL author="Everton Haise Taques " -LABEL maintainer="Orb Community" +LABEL maintainer="NS1 Labs" LABEL version="1.0.0" -ENV BUILD_DEPS "g++ cmake make git pkgconf jq python3-pip python3-setuptools ca-certificates libasan6 zip curl python wget" +ENV BUILD_DEPS "g++ cmake make git pkgconf jq python3-pip python3-setuptools ca-certificates libasan6 zip curl python" COPY ./entrypoint.sh /entrypoint.sh @@ -15,8 +15,8 @@ WORKDIR /pktvisor-src RUN apt-get update && \ apt-get upgrade --yes --force-yes && \ apt-get install --yes --force-yes --no-install-recommends ${BUILD_DEPS} && \ - pip3 install 'conan==1.59.0' --force-reinstall - + pip3 install conan + RUN chmod +x /entrypoint.sh ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/.github/actions/build-cpp/action.yml b/.github/actions/build-cpp/action.yml index 8b7d858f4..8553b5303 100644 --- a/.github/actions/build-cpp/action.yml +++ b/.github/actions/build-cpp/action.yml @@ -1,33 +1,23 @@ name: 'docker' author: 'Everton Haise Taques ' -description: 'Orb Community' +description: 'NS1 Labs' inputs: context: description: "Docker build context" required: true default: "./" - - bugsplat_key: - description: "bugsplat key" + + symbol_url: + description: "symbol url" required: true default: "" - bugsplat_symbol_url: - description: "bugsplat symbol url" - required: true - default: "" - build_type: description: "build type" required: true default: "Debug" - bugsplat: - description: "bugsplat active" - required: true - default: "true" - asan: description: "asan" required: true diff --git a/.github/actions/build-cpp/entrypoint.sh b/.github/actions/build-cpp/entrypoint.sh index 18ad7f92b..11f7d8b3c 100644 --- a/.github/actions/build-cpp/entrypoint.sh +++ b/.github/actions/build-cpp/entrypoint.sh @@ -2,7 +2,7 @@ # function validateParams() { echo "========================= Checking parameters =========================" - [[ -z $INPUT_BUGSPLAT_SYMBOL_URL ]] && echo "Bugsplat symbol url is required" && exit 1 || echo "Bugsplat symbol url pŕesent" + [[ -z $INPUT_SYMBOL_URL ]] && echo "Backtrace symbol url is required" && exit 1 || echo "Backtrace symbol url present" } function build() { @@ -14,15 +14,12 @@ function build() { cp -rf /github/workspace/libs/ /pktvisor-src/libs/ cp -rf /github/workspace/docker/ /pktvisor-src/docker/ cp -rf /github/workspace/golang/ /pktvisor-src/golang/ - cp -rf /github/workspace/build/ /pktvisor-src/build/ cp -rf /github/workspace/integration_tests/ /pktvisor-src/integration_tests/ cp -rf /github/workspace/cmake/ /pktvisor-src/cmake/ cp -rf /github/workspace/CMakeLists.txt /pktvisor-src/ cp -rf /github/workspace/conanfile.txt /pktvisor-src/ mkdir /tmp/build cd /tmp/build - cp -rf /pktvisor-src/build/conan_home/ . - chmod -R 777 /tmp/build/conan_home/ conan profile new --detect default conan profile update settings.compiler.libcxx=libstdc++11 default conan config set general.revisions_enabled=1 @@ -30,36 +27,28 @@ function build() { make all -j 4 } -function move() { +function compact() { echo "========================= Compacting binary and copying =========================" cd /tmp/build + zip pktvisord.zip /tmp/build/bin/pktvisord cp -rf /tmp/build/bin/pktvisord /github/workspace/ strip -s /tmp/build/bin/crashpad_handler cp -rf /tmp/build/bin/crashpad_handler /github/workspace/ cp -rf /tmp/build/bin/pktvisor-reader /github/workspace/ cp -rf /tmp/build/VERSION /github/workspace/ - chmod -R 777 /tmp/build/conan_home/ - cp -rf /tmp/build/conan_home/ /github/workspace/build/ + #version for pktvisor-cli cp -rf /pktvisor-src/golang/pkg/client/version.go /github/workspace/version.go + #copy pktvisor custom iana port service names file cp -rf /pktvisor-src/src/tests/fixtures/pktvisor-port-service-names.csv /github/workspace/custom-iana.csv } -function publishToBugsplat() { - echo "========================= Publishing symbol to bugsplat =========================" +function publish() { + echo "========================= Publishing symbol to backtrace =========================" cd /tmp/build - if [ "$INPUT_BUGSPLAT" == "true" ]; then - wget https://github.com/orb-community/CrashpadTools/raw/main/linux/dump_syms - chmod a+x ./dump_syms - wget https://github.com/orb-community/CrashpadTools/raw/main/linux/symupload - chmod a+x ./symupload - ./dump_syms /github/workspace/pktvisord > pktvisor.sym - PKTVISOR_VERSION=$(cat VERSION) - ls -lha - ./symupload -k $INPUT_BUGSPLAT_KEY pktvisor.sym $INPUT_BUGSPLAT_SYMBOL_URL$PKTVISOR_VERSION 2>/dev/null - fi + curl --data-binary @pktvisord.zip -H "Expect: gzip" "${INPUT_SYMBOL_URL}" } validateParams build -move -publishToBugsplat +compact +publish diff --git a/.github/actions/build-go/Dockerfile b/.github/actions/build-go/Dockerfile index d213b8ace..9a2f5e5e5 100644 --- a/.github/actions/build-go/Dockerfile +++ b/.github/actions/build-go/Dockerfile @@ -1,7 +1,7 @@ FROM golang:latest LABEL author="Everton Haise Taques " -LABEL maintainer="netboxlabs" +LABEL maintainer="NS1 Labs" LABEL version="1.0.0" COPY ./entrypoint.sh /entrypoint.sh diff --git a/.github/actions/build-go/action.yml b/.github/actions/build-go/action.yml index bd55a35c0..8501d8e5c 100644 --- a/.github/actions/build-go/action.yml +++ b/.github/actions/build-go/action.yml @@ -12,18 +12,8 @@ inputs: description: "Dockerfile used to build the image" required: true default: "./Dockerfile" - - goos: - description: "OS for cross-build" - required: false - default: "linux" - - goarch: - description: "ARCH for cross-build" - required: false - default: "amd64" runs: using: 'docker' image: 'Dockerfile' - + \ No newline at end of file diff --git a/.github/actions/build-go/entrypoint.sh b/.github/actions/build-go/entrypoint.sh index 3039b2131..bd4054b2c 100644 --- a/.github/actions/build-go/entrypoint.sh +++ b/.github/actions/build-go/entrypoint.sh @@ -6,7 +6,7 @@ function build() { # Copying this from previous build (cpp) cp -rf ./version.go /src/pkg/client/version.go cd /src - GOOS=$INPUT_GOOS GOARCH=$INPUT_GOARCH go build -o pktvisor-cli cmd/pktvisor-cli/main.go + go build -o pktvisor-cli cmd/pktvisor-cli/main.go } function copy() { @@ -15,4 +15,4 @@ function copy() { } build -copy +copy \ No newline at end of file diff --git a/.github/hosted-runner/amd64/user_data.sh b/.github/hosted-runner/amd64/user_data.sh index ae7cf7786..c63dcb27f 100644 --- a/.github/hosted-runner/amd64/user_data.sh +++ b/.github/hosted-runner/amd64/user_data.sh @@ -21,6 +21,6 @@ chown ubuntu.ubuntu /actions-runner -R #extract git actions runner installer /bin/su -c "cd /actions-runner && tar xzf ./actions-runner-linux-x64-2.296.2.tar.gz" - ubuntu >> /home/ubuntu/user-data.log -/bin/su -c "cd /actions-runner && ./config.sh --unattended --url https://github.com/orb-community/pktvisor --token RUNNER_TOKEN --name AMD64_RUNNER --labels RUNNER_LABEL --work _work --runasservice" - ubuntu >> /home/ubuntu/user-data.log +/bin/su -c "cd /actions-runner && ./config.sh --unattended --url https://github.com/ns1labs/pktvisor --token RUNNER_TOKEN --name AMD64_RUNNER --labels RUNNER_LABEL --work _work --runasservice" - ubuntu >> /home/ubuntu/user-data.log /bin/su -c "cd /actions-runner && ./run.sh" - ubuntu >> /home/ubuntu/user-data.log diff --git a/.github/hosted-runner/arm32/user_data.sh b/.github/hosted-runner/arm32/user_data.sh index ae7cf7786..c63dcb27f 100644 --- a/.github/hosted-runner/arm32/user_data.sh +++ b/.github/hosted-runner/arm32/user_data.sh @@ -21,6 +21,6 @@ chown ubuntu.ubuntu /actions-runner -R #extract git actions runner installer /bin/su -c "cd /actions-runner && tar xzf ./actions-runner-linux-x64-2.296.2.tar.gz" - ubuntu >> /home/ubuntu/user-data.log -/bin/su -c "cd /actions-runner && ./config.sh --unattended --url https://github.com/orb-community/pktvisor --token RUNNER_TOKEN --name AMD64_RUNNER --labels RUNNER_LABEL --work _work --runasservice" - ubuntu >> /home/ubuntu/user-data.log +/bin/su -c "cd /actions-runner && ./config.sh --unattended --url https://github.com/ns1labs/pktvisor --token RUNNER_TOKEN --name AMD64_RUNNER --labels RUNNER_LABEL --work _work --runasservice" - ubuntu >> /home/ubuntu/user-data.log /bin/su -c "cd /actions-runner && ./run.sh" - ubuntu >> /home/ubuntu/user-data.log diff --git a/.github/hosted-runner/arm64/user_data.sh b/.github/hosted-runner/arm64/user_data.sh index a2ab366a7..449dcdb8a 100644 --- a/.github/hosted-runner/arm64/user_data.sh +++ b/.github/hosted-runner/arm64/user_data.sh @@ -21,6 +21,6 @@ chown ubuntu.ubuntu /actions-runner -R #extract git actions runner installer /bin/su -c "cd /actions-runner && tar xzf ./actions-runner-linux-arm64-2.294.0.tar.gz" - ubuntu >> /home/ubuntu/user-data.log -/bin/su -c "cd /actions-runner && ./config.sh --unattended --url https://github.com/orb-community/pktvisor --token RUNNER_TOKEN --name ARM64_RUNNER --labels RUNNER_LABEL --work _work --runasservice" - ubuntu >> /home/ubuntu/user-data.log +/bin/su -c "cd /actions-runner && ./config.sh --unattended --url https://github.com/ns1labs/pktvisor --token RUNNER_TOKEN --name ARM64_RUNNER --labels RUNNER_LABEL --work _work --runasservice" - ubuntu >> /home/ubuntu/user-data.log /bin/su -c "cd /actions-runner && ./run.sh" - ubuntu >> /home/ubuntu/user-data.log diff --git a/.github/workflows/build-develop.yml b/.github/workflows/build-develop.yml index e841417d2..fb31f4db1 100644 --- a/.github/workflows/build-develop.yml +++ b/.github/workflows/build-develop.yml @@ -20,13 +20,13 @@ env: jobs: unit-tests-mac: # The CMake configure and build commands are platform agnostic and should work equally - # well on Windows or Mac. You can convert this to a matrix build if you need + # well on Windows or Mac. You can convert this to a matrix build if you need # cross-platform coverage. # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix runs-on: macos-11 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Create Build Environment # Some projects don't allow in-source building, so create a separate build directory @@ -34,17 +34,7 @@ jobs: run: cmake -E make_directory ${{github.workspace}}/build - name: Get Conan - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- + run: brew install conan - name: Configure CMake # Use a bash shell so we can use the same syntax for environment variable @@ -68,6 +58,12 @@ jobs: # Execute tests defined by the CMake configuration. # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail run: ctest -C $BUILD_TYPE + + - name: Login to ns1labs conan + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan user -p ${{ secrets.CONAN_LABS_PASSWORD }} -r ns1labs ${{ secrets.CONAN_LABS_USERNAME }} + + - name: Cache conan packages + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan upload "*" --all -r ns1labs -c unit-tests-linux: # The CMake configure and build commands are platform agnostic and should work equally @@ -78,9 +74,8 @@ jobs: runs-on: ubuntu-latest outputs: version_number: ${{ env.VERSION }} - commit_hash: ${{ env.COMMIT }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Create Build Environment # Some projects don't allow in-source building, so create a separate build directory @@ -90,23 +85,13 @@ jobs: - name: Get Conan # You may pin to the exact commit or the version. # uses: turtlebrowser/get-conan@4dc7e6dd45c8b1e02e909979d7cfc5ebba6ddbe2 - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 + uses: turtlebrowser/get-conan@v1.0 - name: linux package install run: | sudo apt-get update sudo apt-get install --yes --no-install-recommends jq - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- - + - name: Configure CMake # Use a bash shell so we can use the same syntax for environment variable # access regardless of the host operating system @@ -122,7 +107,6 @@ jobs: run: | echo "VERSION=`cat ${{github.workspace}}/build/VERSION`" >> $GITHUB_ENV echo "::set-output name=version::$(cat ${{github.workspace}}/build/VERSION)" - echo "COMMIT=`git rev-parse --short HEAD`" >> $GITHUB_ENV - name: Build working-directory: ${{github.workspace}}/build @@ -137,7 +121,13 @@ jobs: # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail run: ctest -C $BUILD_TYPE - build-win64: + - name: Login to ns1labs conan + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan user -p ${{ secrets.CONAN_LABS_PASSWORD }} -r ns1labs ${{ secrets.CONAN_LABS_USERNAME }} + + - name: Cache conan packages + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan upload "*" --all -r ns1labs -c + + build-windows: # The CMake configure and build commands are platform agnostic and should work equally # well on Windows or Mac. You can convert this to a matrix build if you need # cross-platform coverage. @@ -147,7 +137,7 @@ jobs: outputs: version_number: ${{ env.VERSION }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Create Build Environment # Some projects don't allow in-source building, so create a separate build directory @@ -157,22 +147,7 @@ jobs: - name: Get Conan # You may pin to the exact commit or the version. # uses: turtlebrowser/get-conan@4dc7e6dd45c8b1e02e909979d7cfc5ebba6ddbe2 - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 - - - name: Remove libpcap from conanfile - shell: bash - run: | - sed -i -e "s/libpcap.*//g" "${{github.workspace}}\conanfile.txt" - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- + uses: turtlebrowser/get-conan@v1.0 - name: Configure CMake # Use a bash shell so we can use the same syntax for environment variable @@ -204,68 +179,18 @@ jobs: # # Execute tests defined by the CMake configuration. # # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail # run: ctest -C $BUILD_TYPE - - - name: Debug artifacts - shell: pwsh - run: | - Get-ChildItem -Force ${{github.workspace}} - Get-ChildItem -Force ${{github.workspace}}\build - Get-ChildItem -Force ${{github.workspace}}\build\bin - - - name: Persist to workspace - uses: actions/upload-artifact@v2 - with: - name: windows-build - path: | - ${{github.workspace}}\build\bin\pktvisor-reader.exe - ${{github.workspace}}\build\bin\pktvisord.exe - ${{github.workspace}}\golang\pkg\client\version.go - retention-days: 1 - - build-cli-win64: - needs: [ build-win64 ] - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Attach to workspace - uses: actions/download-artifact@v3 - with: - name: windows-build - - - name: Debug artifacts - shell: bash - run: | - ls -lha - - - name: Debug artifacts - shell: bash + - name: Login to ns1labs conan + shell: pwsh run: | - ls -lha - mv src pktvisor-src - cp -rpf golang/pkg/client/version.go . - ls -lha + $env:CONAN_USER_HOME = "${{github.workspace}}\build\conan_home" + conan user -p ${{ secrets.CONAN_LABS_PASSWORD }} -r ns1labs ${{ secrets.CONAN_LABS_USERNAME }} - - name: Build pktvisor-cli - uses: ./.github/actions/build-go - with: - context: "." - goos: windows - file: "./Dockerfile" - - - name: Debug artifacts - shell: bash - run: | - mv ./pktvisor-cli pktvisor-cli.exe - mv ./build/bin/pktvisord.exe ./pktvisord.exe - mv ./build/bin/pktvisor-reader.exe ./pktvisor-reader.exe - ls -lha - - - name: compacting windows binary + - name: Cache conan packages + shell: pwsh run: | - zip pktvisor-win64.zip pktvisor-cli.exe pktvisor-reader.exe pktvisord.exe - ls -lha + $env:CONAN_USER_HOME = "${{github.workspace}}\build\conan_home" + conan upload "*" --all -r ns1labs -c package-amd64: needs: [ unit-tests-linux ] @@ -275,27 +200,15 @@ jobs: outputs: version_number: ${{ env.VERSION }} steps: - - uses: actions/checkout@v3 - - - name: Create Build Environment - run: cmake -E make_directory ${{github.workspace}}/build - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- + - uses: actions/checkout@v2 - - name: Build pktvisord + push symbol to bugsplat.com + - name: Build pktvisord + push symbol to backtrace.io uses: ./.github/actions/build-cpp with: context: "." build_type: "Release" asan: "OFF" - bugsplat_key: ${{secrets.BUGSPLAT_KEY}} - bugsplat_symbol_url: ${{secrets.BUGSPLAT_SYMBOL_URL}} - bugsplat: "true" + symbol_url: ${{secrets.SYMBOL_URL}} file: "./Dockerfile" - name: Build pktvisor-cli @@ -332,8 +245,8 @@ jobs: - name: Login to Docker Hub uses: docker/login-action@v1 with: - username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} - password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Replace token run: | @@ -341,7 +254,7 @@ jobs: - name: Replace escape url run: | - REPLACE=${{ secrets.BUGSPLAT_CP_URL }} + REPLACE=${{ secrets.CRASHPAD_URL }} ESCAPED_REPLACE=$(printf '%s\n' "$REPLACE" | sed -e 's/[\/&]/\\&/g') sed -i -e "s/CP_URL/$ESCAPED_REPLACE/g" docker/run.sh @@ -354,43 +267,29 @@ jobs: - name: Build + push - pktvisor (multi-arch) env: - IMAGE_NAME1: orbcommunity/pktvisor - IMAGE_NAME2: ghcr.io/orb-community/pktvisor + IMAGE_NAME: ns1labs/pktvisor + IMAGE_NAME2: ghcr.io/ns1labs/pktvisor uses: docker/build-push-action@v3 with: builder: ${{ steps.buildx.outputs.name }} context: . file: ./docker/Dockerfile.crashhandler platforms: linux/amd64 - tags: ${{ env.IMAGE_NAME1 }}:${{ needs.unit-tests-linux.outputs.version_number }} , ${{ env.IMAGE_NAME1 }}:${{ env.REF_TAG }} , ${{ env.IMAGE_NAME2 }}:amd64-${{needs.unit-tests-linux.outputs.commit_hash}} - outputs: type=docker,dest=/tmp/amd64.tar - - - name: Load image - env: - IMAGE_NAME1: orbcommunity/pktvisor - IMAGE_NAME2: ghcr.io/orb-community/pktvisor - run: | - docker load --input /tmp/amd64.tar - docker image ls -a - docker push ${{ env.IMAGE_NAME2 }}:amd64-${{needs.unit-tests-linux.outputs.commit_hash}} - docker push ${{ env.IMAGE_NAME1 }}:${{ env.REF_TAG }} - docker push ${{ env.IMAGE_NAME1 }}:${{ needs.unit-tests-linux.outputs.version_number }} + push: true + tags: ${{ env.IMAGE_NAME }}:${{ needs.unit-tests-linux.outputs.version_number }} , ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} , ${{ env.IMAGE_NAME2 }}:amd64 build-app-image-x64: needs: [ package-amd64 ] runs-on: ubuntu-latest - #if: github.event_name != 'pull_request' + if: github.event_name != 'pull_request' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Create Build Environment run: cmake -E make_directory ${{github.workspace}}/build - name: Get Conan - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 + uses: turtlebrowser/get-conan@v1.0 - name: Configure CMake to generate VERSION shell: bash @@ -412,22 +311,29 @@ jobs: run: | echo "REF_TAG=latest-develop" >> $GITHUB_ENV echo "PRERELEASE=true" >> $GITHUB_ENV - echo "DRAFT=false" >> $GITHUB_ENV + echo "DRAFT=false" >> $GITHUB_ENV + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Prepare version file + - name: Build + push - pktvisor-cli + env: + CLI_BINARY: pktvisor-cli + IMAGE_NAME: ns1labs/pktvisor-cli run: | - cp -rpf golang/pkg/client/version.go . - - - name: Build pktvisor-cli - uses: ./.github/actions/build-go - with: - context: "." - file: "./Dockerfile" + docker build . --file docker/Dockerfile.pktvisor-cli --tag ${{ env.IMAGE_NAME }}:${{ env.VERSION }} --tag ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} + docker push -a ${{ env.IMAGE_NAME }} + echo "CONT_ID=$(docker create --name ${{ env.CLI_BINARY }}-slim-tmp ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }})" >> $GITHUB_ENV - - name: Debug and Rename artifacts + - name: Extract static pktvisor-cli asset + env: + CLI_BINARY: pktvisor-cli + IMAGE_NAME: ns1labs/pktvisor-cli run: | - ls -lha . - mv pktvisor-cli pktvisor-cli-linux-x86_64-${{ env.VERSION }} + docker cp ${{ env.CONT_ID }}:/${{ env.CLI_BINARY }} ${{github.workspace}}/${{ env.CLI_BINARY }}-linux-x86_64-${{ env.VERSION }} - name: Upload pktvisor-cli artifact env: @@ -436,16 +342,10 @@ jobs: with: name: ${{ env.BINARY_NAME }} path: ${{github.workspace}}/${{ env.BINARY_NAME }} - - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.ORB_DOCKERHUB_USERNAME }} - password: ${{ secrets.ORB_DOCKERHUB_TOKEN }} - name: Build + push - pktvisor-prom-write env: - IMAGE_NAME: orbcommunity/pktvisor-prom-write + IMAGE_NAME: ns1labs/pktvisor-prom-write working-directory: ${{github.workspace}}/centralized_collection/prometheus/docker-grafana-agent run: | docker build . --file Dockerfile --build-arg PKTVISOR_TAG=${{ env.REF_TAG }} --tag ${{ env.IMAGE_NAME }}:${{ env.VERSION }} --tag ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} @@ -453,7 +353,7 @@ jobs: - name: Generate AppImage env: - IMAGE_NAME: orbcommunity/pktvisor + IMAGE_NAME: ns1labs/pktvisor working-directory: ${{github.workspace}}/appimage run: | DEV_IMAGE="${{ env.IMAGE_NAME }}:${{ env.VERSION }}" DEV_MODE=t make pktvisor-x86_64.AppImage @@ -473,7 +373,7 @@ jobs: uses: peter-evans/repository-dispatch@v2 with: token: ${{ secrets.BOT_TOKEN }} - repository: orb-community/orb + repository: ns1labs/orb event-type: build-agent client-payload: '{"branch_name": "develop"}' @@ -485,13 +385,13 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Generate runner token id: token run: | sleep 3 - curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/orb-community/pktvisor/actions/runners/registration-token -o token.json + curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/ns1labs/pktvisor/actions/runners/registration-token -o token.json echo "::set-output name=runner::$(cat token.json | jq .token --raw-output )" cat token.json @@ -569,7 +469,7 @@ jobs: if: github.event_name != 'pull_request' steps: - uses: AutoModality/action-clean@v1 - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Locking pktvisor integration test state run: | @@ -647,7 +547,6 @@ jobs: target: "/usr/share/nginx/html/pktvisor/develop/${{ needs.integration-tests.outputs.commit_hash }}/pktvisor/python-test" remove-github-runner-amd64: - continue-on-error: true needs: [ spinup-amd64-runner , upload-qa-artifact ] runs-on: ubuntu-latest if: github.event_name != 'pull_request' @@ -681,7 +580,7 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Attach to workspace uses: actions/download-artifact@v2 @@ -726,7 +625,7 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Get branch name shell: bash @@ -735,7 +634,7 @@ jobs: - name: Generate runner token id: token run: | - curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/orb-community/pktvisor/actions/runners/registration-token -o token.json + curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/ns1labs/pktvisor/actions/runners/registration-token -o token.json echo "::set-output name=runner::$(cat token.json | jq .token --raw-output )" cat token.json @@ -809,32 +708,16 @@ jobs: runs-on: [self-hosted, linux, ARM64] if: github.event_name != 'pull_request' steps: - - name: cleanup #https://github.com/actions/checkout/issues/211 - run: | - sudo chown -R $USER:$USER $GITHUB_WORKSPACE - - name: Checkout - uses: actions/checkout@v3 - - - name: Create Build Environment - run: cmake -E make_directory ${{github.workspace}}/build - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-arm64-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}-arm64- - - - name: Build pktvisord + push symbol to bugsplat.com + uses: actions/checkout@v2 + + - name: Build pktvisord + push symbol to backtrace.io uses: ./.github/actions/build-cpp with: context: "." build_type: "Release" asan: "OFF" - bugsplat_key: ${{secrets.BUGSPLAT_KEY}} - bugsplat_symbol_url: ${{secrets.BUGSPLAT_SYMBOL_URL}} - bugsplat: "false" + symbol_url: ${{secrets.SYMBOL_URL}} file: "./Dockerfile" - name: Build pktvisor-cli @@ -873,33 +756,25 @@ jobs: - name: Replace escape url run: | - REPLACE=${{ secrets.BUGSPLAT_CP_URL }} + REPLACE=${{ secrets.CRASHPAD_URL }} ESCAPED_REPLACE=$(printf '%s\n' "$REPLACE" | sed -e 's/[\/&]/\\&/g') sed -i -e "s/CP_URL/$ESCAPED_REPLACE/g" docker/run.sh - name: Build + push - pktvisor (multi-arch) - id: docker_build env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor + IMAGE_NAME: ghcr.io/ns1labs/pktvisor uses: docker/build-push-action@v3 with: builder: ${{ steps.buildx.outputs.name }} context: . file: ./docker/Dockerfile.crashhandler platforms: linux/arm64 - tags: ${{ env.IMAGE_NAME }}:arm64-${{needs.unit-tests-linux.outputs.commit_hash}} - outputs: type=docker,dest=/tmp/arm64.tar - - - name: Load image - env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor - run: | - docker load --input /tmp/arm64.tar - docker image ls -a - docker push ${{ env.IMAGE_NAME }}:arm64-${{needs.unit-tests-linux.outputs.commit_hash}} + push: true + tags: ${{ env.IMAGE_NAME }}:arm64 + build-args: | + ghtoken=${{ secrets.GITHUB_TOKEN }} remove-github-runner-arm64: - continue-on-error: true needs: [ spinup-arm64-runner, package-arm64 ] runs-on: ubuntu-latest if: github.event_name != 'pull_request' @@ -933,7 +808,7 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Attach to workspace uses: actions/download-artifact@v2 @@ -984,13 +859,13 @@ jobs: - name: Create docker manifest run: | - docker manifest create ghcr.io/orb-community/pktvisor:latest-develop \ - ghcr.io/orb-community/pktvisor:amd64-${{needs.unit-tests-linux.outputs.commit_hash}} \ - ghcr.io/orb-community/pktvisor:arm64-${{needs.unit-tests-linux.outputs.commit_hash}} + docker manifest create ghcr.io/ns1labs/pktvisor:latest-develop \ + ghcr.io/ns1labs/pktvisor:amd64 \ + ghcr.io/ns1labs/pktvisor:arm64 - name: Push manifest to ghcr.io run: | - docker manifest push ghcr.io/orb-community/pktvisor:latest-develop + docker manifest push ghcr.io/ns1labs/pktvisor:latest-develop - name: Install skopeo run: | @@ -1009,38 +884,11 @@ jobs: run: | sudo skopeo copy \ --all \ - docker://ghcr.io/orb-community/pktvisor:latest-develop \ - docker://docker.io/orbcommunity/pktvisor:latest-develop - sudo skopeo copy \ - --all \ - docker://ghcr.io/orb-community/pktvisor:latest-develop \ - docker://docker.io/orbcommunity/pktvisor:develop + docker://ghcr.io/ns1labs/pktvisor:latest-develop \ + docker://docker.io/ns1labs/pktvisor:latest-develop sudo skopeo copy \ --all \ - docker://ghcr.io/orb-community/pktvisor:latest-develop \ - docker://docker.io/orbcommunity/pktvisor:${{ needs.unit-tests-linux.outputs.version_number }} - - - - name: Delete arm64 images from ghcr.io - uses: bots-house/ghcr-delete-image-action@v1.0.0 - with: - owner: orb-community - name: pktvisor - token: ${{ secrets.GITHUB_TOKEN }} - tag: arm64-${{needs.unit-tests-linux.outputs.commit_hash}} - - - name: Delete amd64 images from ghcr.io - uses: bots-house/ghcr-delete-image-action@v1.0.0 - with: - owner: orb-community - name: pktvisor - token: ${{ secrets.GITHUB_TOKEN }} - tag: amd64-${{needs.unit-tests-linux.outputs.commit_hash}} - - - name: Delete latest-develop images from ghcr.io - uses: bots-house/ghcr-delete-image-action@v1.0.0 - with: - owner: orb-community - name: pktvisor - token: ${{ secrets.GITHUB_TOKEN }} - tag: latest-develop + docker://ghcr.io/ns1labs/pktvisor:latest-develop \ + docker://docker.io/ns1labs/pktvisor:${{ needs.unit-tests-linux.outputs.version_number }} + + diff --git a/.github/workflows/build-release.yml b/.github/workflows/build-release.yml index 828f41a69..9c38e6fec 100644 --- a/.github/workflows/build-release.yml +++ b/.github/workflows/build-release.yml @@ -23,23 +23,13 @@ jobs: os: [ ubuntu-latest, macos-11 ] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 - + - uses: actions/checkout@v2 + - name: Create Build Environment run: cmake -E make_directory ${{github.workspace}}/build - name: Get Conan - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- + uses: turtlebrowser/get-conan@v1.0 - name: linux package install if: matrix.os == 'ubuntu-latest' @@ -62,6 +52,12 @@ jobs: shell: bash run: ctest -C $BUILD_TYPE + - name: Login to ns1labs conan + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan user -p ${{ secrets.CONAN_LABS_PASSWORD }} -r ns1labs ${{ secrets.CONAN_LABS_USERNAME }} + + - name: Cache conan packages + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan upload "*" --all -r ns1labs -c + prebuild-package: needs: [ unit-tests ] runs-on: ubuntu-latest @@ -70,7 +66,7 @@ jobs: branch_name: ${{ steps.branch.outputs.name }} if: github.event_name != 'pull_request' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Remove suffix from Cmakelists.txt file run: | @@ -81,10 +77,7 @@ jobs: run: cmake -E make_directory ${{github.workspace}}/build - name: Get Conan - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 + uses: turtlebrowser/get-conan@v1.0 - name: Configure CMake to generate VERSION shell: bash @@ -118,7 +111,7 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v2 with: fetch-depth: 0 @@ -131,10 +124,7 @@ jobs: run: cmake -E make_directory ${{github.workspace}}/build - name: Get Conan - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 + uses: turtlebrowser/get-conan@v1.0 - name: Configure CMake to generate VERSION shell: bash @@ -156,7 +146,7 @@ jobs: - name: Post pktvisor changelog in slack channel run: | curl -d "text=:mega: *Pktvisor release reporter* :mega: - $(cat changelog.md)" -d "channel=C041B9204CF" -H "Authorization: Bearer ${{secrets.SLACK_APP_TOKEN}}" -X POST https://slack.com/api/chat.postMessage + $(cat changelog.md)" -d "channel=C02SEF78ZPT" -H "Authorization: Bearer ${{secrets.SLACK_APP_TOKEN}}" -X POST https://slack.com/api/chat.postMessage - name: Create release id: create_release @@ -170,6 +160,7 @@ jobs: draft: false prerelease: false + spinup-arm64-runner: needs: [ prebuild-package ] runs-on: ubuntu-latest @@ -179,12 +170,12 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Generate runner token id: token run: | - curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/orb-community/pktvisor/actions/runners/registration-token -o token.json + curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/ns1labs/pktvisor/actions/runners/registration-token -o token.json echo "::set-output name=runner::$(cat token.json | jq .token --raw-output )" cat token.json @@ -265,12 +256,12 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Generate runner token id: token run: | - curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/orb-community/pktvisor/actions/runners/registration-token -o token.json + curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/ns1labs/pktvisor/actions/runners/registration-token -o token.json echo "::set-output name=runner::$(cat token.json | jq .token --raw-output )" cat token.json @@ -341,138 +332,6 @@ jobs: path: | .github/hosted-runner/amd64/terraform.tfstate retention-days: 1 - - build-win64: - # The CMake configure and build commands are platform agnostic and should work equally - # well on Windows or Mac. You can convert this to a matrix build if you need - # cross-platform coverage. - # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix - - runs-on: windows-2019 - outputs: - version_number: ${{ env.VERSION }} - steps: - - uses: actions/checkout@v3 - - - name: Create Build Environment - # Some projects don't allow in-source building, so create a separate build directory - # We'll use this as our working directory for all subsequent commands - run: cmake -E make_directory ${{github.workspace}}\build - - - name: Get Conan - # You may pin to the exact commit or the version. - # uses: turtlebrowser/get-conan@4dc7e6dd45c8b1e02e909979d7cfc5ebba6ddbe2 - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 - - - name: Remove libpcap from conanfile - shell: bash - run: | - sed -i -e "s/libpcap.*//g" "${{github.workspace}}\conanfile.txt" - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- - - - name: Configure CMake - # Use a bash shell so we can use the same syntax for environment variable - # access regardless of the host operating system - shell: bash - working-directory: ${{github.workspace}}\build - # Note the current convention is to use the -S and -B options here to specify source - # and build directories, but this is only available with CMake 3.13 and higher. - # The CMake binaries on the Github Actions machines are (as of this writing) 3.12 - run: PKG_CONFIG_PATH=${{github.workspace}}\local\lib\pkgconfig cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE - - - name: Get VERSION - shell: pwsh - id: build - run: | - $text = Get-Content ${{github.workspace}}\build\VERSION -Raw - echo "VERSION=$text" >> $env:GITHUB_ENV - - - name: Build - working-directory: ${{github.workspace}}\build - shell: bash - # Execute the build. You can specify a specific target with "--target " - run: cmake --build . --config $BUILD_TYPE -- -m - - # TODO: Fix unit tests for windows platform - #- name: Test - # working-directory: ${{github.workspace}}/build - # shell: bash - # # Execute tests defined by the CMake configuration. - # # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail - # run: ctest -C $BUILD_TYPE - - - name: Debug artifacts - shell: pwsh - run: | - Get-ChildItem -Force ${{github.workspace}} - Get-ChildItem -Force ${{github.workspace}}\build - Get-ChildItem -Force ${{github.workspace}}\build\bin - - - name: Persist to workspace - uses: actions/upload-artifact@v2 - with: - name: windows-build - path: | - ${{github.workspace}}\build\bin\pktvisor-reader.exe - ${{github.workspace}}\build\bin\pktvisord.exe - ${{github.workspace}}\golang\pkg\client\version.go - retention-days: 1 - - build-cli-win64: - needs: [ build-win64 ] - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Attach to workspace - uses: actions/download-artifact@v3 - with: - name: windows-build - - - name: Debug artifacts - shell: bash - run: | - ls -lha - - - name: Debug artifacts - shell: bash - run: | - mv src pktvisor-src - cp -rpf golang/pkg/client/version.go . - - - name: Build pktvisor-cli - uses: ./.github/actions/build-go - with: - context: "." - goos: windows - file: "./Dockerfile" - - - name: Debug artifacts - shell: bash - run: | - mv ./pktvisor-cli pktvisor-cli.exe - mv ./build/bin/pktvisord.exe ./pktvisord.exe - mv ./build/bin/pktvisor-reader.exe ./pktvisor-reader.exe - - - name: compacting windows binary - run: | - zip pktvisor-win64.zip pktvisor-cli.exe pktvisor-reader.exe pktvisord.exe - - - name: Upload win64 binary to latest release - shell: bash - run: | - chmod a+x ./docker/upload-gh-asset.sh - ./docker/upload-gh-asset.sh github_api_token=${{ secrets.BOT_TOKEN }} owner=orb-community repo=pktvisor tag=LATEST filename=./pktvisor-win64.zip package-arm64: needs: [ spinup-arm64-runner ] @@ -483,26 +342,14 @@ jobs: uses: actions/download-artifact@v2 with: name: workspace - - - name: Create Build Environment - run: cmake -E make_directory ${{github.workspace}}/build - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-arm64-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}-arm64- - + - name: Build pktvisord + push symbol to backtrace.io uses: ./.github/actions/build-cpp with: context: "." build_type: "Release" asan: "OFF" - bugsplat_key: ${{secrets.BUGSPLAT_KEY}} - bugsplat_symbol_url: ${{secrets.BUGSPLAT_SYMBOL_URL}} - bugsplat: "false" + symbol_url: ${{secrets.SYMBOL_URL}} file: "./Dockerfile" - name: Build pktvisor-cli @@ -510,10 +357,10 @@ jobs: with: context: "." file: "./Dockerfile" - + - name: Debug artifacts run: ls -lha . - + - name: compacting arm64 binary run: | zip pktvisor-arm64.zip pktvisor-cli pktvisor-reader pktvisord @@ -521,7 +368,7 @@ jobs: - name: Upload arm64 binary to latest release run: | chmod a+x ./docker/upload-gh-asset.sh - ./docker/upload-gh-asset.sh github_api_token=${{ secrets.BOT_TOKEN }} owner=orb-community repo=pktvisor tag=LATEST filename=./pktvisor-arm64.zip + ./docker/upload-gh-asset.sh github_api_token=${{ secrets.BOT_TOKEN }} owner=ns1labs repo=pktvisor tag=LATEST filename=./pktvisor-arm64.zip - name: Get branch name shell: bash @@ -567,30 +414,21 @@ jobs: - name: Replace escape url run: | - REPLACE=${{ secrets.BUGSPLAT_CP_URL }} + REPLACE=${{ secrets.CRASHPAD_URL }} ESCAPED_REPLACE=$(printf '%s\n' "$REPLACE" | sed -e 's/[\/&]/\\&/g') sed -i -e "s/CP_URL/$ESCAPED_REPLACE/g" docker/run.sh - name: Build + push - pktvisor (multi-arch) - id: docker_build env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor + IMAGE_NAME: ghcr.io/ns1labs/pktvisor uses: docker/build-push-action@v3 with: builder: ${{ steps.buildx.outputs.name }} context: . file: ./docker/Dockerfile.crashhandler platforms: linux/arm64 - tags: ${{ env.IMAGE_NAME }}:release-arm64 - outputs: type=docker,dest=/tmp/arm64.tar - - - name: Load image - env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor - run: | - docker load --input /tmp/arm64.tar - docker image ls -a - docker push ${{ env.IMAGE_NAME }}:release-arm64 + push: true + tags: ${{ env.IMAGE_NAME }}:arm64 package-armv7: needs: [ spinup-amd64-runner ] @@ -636,7 +474,7 @@ jobs: sed -i -e "s/CP_TOKEN/${{ secrets.CRASHPAD_TOKEN }}/g" docker/run.sh - name: Replace escape url run: | - REPLACE=${{ secrets.BUGSPLAT_CP_URL }} + REPLACE=${{ secrets.CRASHPAD_URL }} ESCAPED_REPLACE=$(printf '%s\n' "$REPLACE" | sed -e 's/[\/&]/\\&/g') sed -i -e "s/CP_URL/$ESCAPED_REPLACE/g" docker/run.sh @@ -649,55 +487,33 @@ jobs: - name: Build + push - pktvisor (multi-arch) env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor + IMAGE_NAME: ghcr.io/ns1labs/pktvisor uses: docker/build-push-action@v3 with: builder: ${{ steps.buildx.outputs.name }} context: . file: ./docker/Dockerfile platforms: linux/arm/v7 - tags: ${{ env.IMAGE_NAME }}:release-armv7 - outputs: type=docker,dest=/tmp/armv7.tar - - - name: Load image - env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor - run: | - docker load --input /tmp/armv7.tar - docker image ls -a - docker push ${{ env.IMAGE_NAME }}:release-armv7 + push: true + tags: ${{ env.IMAGE_NAME }}:armv7 package-amd64: needs: [ prebuild-package ] runs-on: ubuntu-latest - outputs: - release_version: ${{ env.VERSION }} if: github.event_name != 'pull_request' steps: - name: Attach to workspace uses: actions/download-artifact@v2 with: name: workspace - - - name: Create Build Environment - run: cmake -E make_directory ${{github.workspace}}/build - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- - + - name: Build pktvisord + push symbol to backtrace.io uses: ./.github/actions/build-cpp with: context: "." build_type: "Release" asan: "OFF" - bugsplat_key: ${{secrets.BUGSPLAT_KEY}} - bugsplat_symbol_url: ${{secrets.BUGSPLAT_SYMBOL_URL}} - bugsplat: "true" + symbol_url: ${{secrets.SYMBOL_URL}} file: "./Dockerfile" - name: Build pktvisor-cli @@ -716,7 +532,7 @@ jobs: - name: Upload amd64 binary to latest release run: | chmod a+x ./docker/upload-gh-asset.sh - ./docker/upload-gh-asset.sh github_api_token=${{ secrets.BOT_TOKEN }} owner=orb-community repo=pktvisor tag=LATEST filename=./pktvisor-amd64.zip + ./docker/upload-gh-asset.sh github_api_token=${{ secrets.BOT_TOKEN }} owner=ns1labs repo=pktvisor tag=LATEST filename=./pktvisor-amd64.zip - name: Get branch name shell: bash @@ -762,32 +578,23 @@ jobs: - name: Replace escape url run: | - REPLACE=${{ secrets.BUGSPLAT_CP_URL }} + REPLACE=${{ secrets.CRASHPAD_URL }} ESCAPED_REPLACE=$(printf '%s\n' "$REPLACE" | sed -e 's/[\/&]/\\&/g') sed -i -e "s/CP_URL/$ESCAPED_REPLACE/g" docker/run.sh - name: Build + push - pktvisor (multi-arch) env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor + IMAGE_NAME: ghcr.io/ns1labs/pktvisor uses: docker/build-push-action@v3 with: builder: ${{ steps.buildx.outputs.name }} context: . file: ./docker/Dockerfile.crashhandler platforms: linux/amd64 - tags: ${{ env.IMAGE_NAME }}:release-amd64 - outputs: type=docker,dest=/tmp/amd64.tar - - - name: Load image - env: - IMAGE_NAME: ghcr.io/orb-community/pktvisor - run: | - docker load --input /tmp/amd64.tar - docker image ls -a - docker push ${{ env.IMAGE_NAME }}:release-amd64 + push: true + tags: ${{ env.IMAGE_NAME }}:amd64 remove-github-runner-arm64: - continue-on-error: true needs: [ package-arm64 ] runs-on: ubuntu-latest if: github.event_name != 'pull_request' @@ -806,7 +613,7 @@ jobs: - name: Generate remove runner token id: remove-token-arm64 run: | - curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/orb-community/pktvisor/actions/runners/remove-token -o token.json + curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/ns1labs/pktvisor/actions/runners/remove-token -o token.json echo "::set-output name=remove-runner::$(cat token.json | jq .token --raw-output )" cat token.json echo "TOKEN_RUNNER_ARM64=$(cat token.json | jq .token --raw-output )" >> $GITHUB_ENV @@ -829,7 +636,7 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Attach to workspace uses: actions/download-artifact@v2 @@ -867,7 +674,6 @@ jobs: terraform destroy -auto-approve remove-github-runner-amd64: - continue-on-error: true needs: [ package-armv7 ] runs-on: ubuntu-latest if: github.event_name != 'pull_request' @@ -886,7 +692,7 @@ jobs: - name: Generate remove runner token id: remove-token-amd64 run: | - curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/orb-community/pktvisor/actions/runners/remove-token -o token.json + curl -X POST -H "authorization: Bearer ${{ secrets.BOT_TOKEN }}" -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/ns1labs/pktvisor/actions/runners/remove-token -o token.json echo "::set-output name=remove-runner::$(cat token.json | jq .token --raw-output )" cat token.json echo "TOKEN_RUNNER_AMD64=$(cat token.json | jq .token --raw-output )" >> $GITHUB_ENV @@ -909,7 +715,7 @@ jobs: if: github.event_name != 'pull_request' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v2 - name: Attach to workspace uses: actions/download-artifact@v2 @@ -960,14 +766,14 @@ jobs: - name: Create docker manifest run: | - docker manifest create ghcr.io/orb-community/pktvisor:latest \ - ghcr.io/orb-community/pktvisor:release-amd64 \ - ghcr.io/orb-community/pktvisor:release-armv7 \ - ghcr.io/orb-community/pktvisor:release-arm64 + docker manifest create ghcr.io/ns1labs/pktvisor:latest \ + ghcr.io/ns1labs/pktvisor:amd64 \ + ghcr.io/ns1labs/pktvisor:armv7 \ + ghcr.io/ns1labs/pktvisor:arm64 - name: Push manifest to ghcr.io run: | - docker manifest push ghcr.io/orb-community/pktvisor:latest + docker manifest push ghcr.io/ns1labs/pktvisor:latest - name: Install skopeo run: | @@ -980,18 +786,11 @@ jobs: - name: Login skopeo to dockerhub run: | - sudo skopeo login -u ${{ secrets.ORB_DOCKERHUB_USERNAME }} -p ${{ secrets.ORB_DOCKERHUB_TOKEN }} docker.io + sudo skopeo login -u ${{ secrets.DOCKERHUB_USERNAME }} -p ${{ secrets.DOCKERHUB_TOKEN }} docker.io - - name: Copy latest from ghcr.io to docker.io - run: | - sudo skopeo copy \ - --all \ - docker://ghcr.io/orb-community/pktvisor:latest \ - docker://docker.io/orbcommunity/pktvisor:latest - - - name: Copy release version from ghcr.io to docker.io + - name: Copy from ghcr.io to docker.io run: | sudo skopeo copy \ --all \ - docker://ghcr.io/orb-community/pktvisor:latest \ - docker://docker.io/orbcommunity/pktvisor:${{ needs.package-amd64.outputs.release_version }} + docker://ghcr.io/ns1labs/pktvisor:latest \ + docker://docker.io/ns1labs/pktvisor:latest diff --git a/.github/workflows/clean-runners.yml b/.github/workflows/clean-runners.yml index 0fff3a5c4..d84749db1 100644 --- a/.github/workflows/clean-runners.yml +++ b/.github/workflows/clean-runners.yml @@ -14,23 +14,4 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.BOT_TOKEN }} run: | - gh api -H "Accept: application/vnd.github.v3+json" /repos/orb-community/pktvisor/actions/runners -q '.runners[] | {id,status,busy} | select((.busy == false) and (.status == "offline")) | {id} | .[]' --paginate | xargs -I {} gh api --method DELETE -H "Accept: application/vnd.github.v3+json" /repos/orb-community/pktvisor/actions/runners/{} - -# - name: Clear cache -# uses: actions/github-script@v6 -# with: -# script: | -# console.log("About to clear") -# const caches = await github.rest.actions.getActionsCacheList({ -# owner: context.repo.owner, -# repo: context.repo.repo, -# }) -# for (const cache of caches.data.actions_caches) { -# console.log(cache) -# github.rest.actions.deleteActionsCacheById({ -# owner: context.repo.owner, -# repo: context.repo.repo, -# cache_id: cache.id, -# }) -# } -# console.log("Clear completed") + gh api -H "Accept: application/vnd.github.v3+json" /repos/ns1labs/pktvisor/actions/runners -q '.runners[] | {id,status,busy} | select((.busy == false) and (.status == "offline")) | {id} | .[]' --paginate | xargs -I {} gh api --method DELETE -H "Accept: application/vnd.github.v3+json" /repos/ns1labs/pktvisor/actions/runners/{} diff --git a/.github/workflows/code-ql.yml b/.github/workflows/code-ql.yml index 460cefa3c..a76216146 100644 --- a/.github/workflows/code-ql.yml +++ b/.github/workflows/code-ql.yml @@ -2,7 +2,11 @@ name: "CodeQL" on: - + #push: + # branches: [ "develop" ] + #pull_request: + # The branches below must be a subset of the branches above + # branches: [ "develop" ] schedule: #run 5 minutes after midnight daily - cron: '5 0 * * *' @@ -60,10 +64,11 @@ jobs: - run: | # Run Build - set up dependencies, env vars, compile, and make test #install conan - pip install --no-cache-dir 'conan==1.59.0' --force-reinstall + pip install --no-cache-dir conan # create conan config CONAN_V2_MODE=1 conan config init conan config set general.revisions_enabled=1 + conan remote add ns1labs https://ns1labs.jfrog.io/artifactory/api/conan/ns1labs-conan # add custom compiler settings for libc python3 -c 'import yaml; p = "${{env.CONAN_USER_HOME}}/.conan/settings.yml"; d = yaml.safe_load(open(p)); d["compiler"]["gcc"]["libc"] = ["None", "glibc", "musl"]; yaml.safe_dump(d, open(p, "w"))' # Create Conan host profile @@ -87,7 +92,7 @@ jobs: LDFLAGS=${{matrix.ldflags}} EOF # clone the repository, not sure if this needs to be done - git clone https://github.com/orb-community/pktvisor.git + git clone https://github.com/ns1labs/pktvisor.git cd pktvisor mkdir build && cd build # configure and handle dependencies diff --git a/.github/workflows/build_cross.yml b/.github/workflows/cross.yml similarity index 83% rename from .github/workflows/build_cross.yml rename to .github/workflows/cross.yml index 195a2454f..4d3ece9cd 100644 --- a/.github/workflows/build_cross.yml +++ b/.github/workflows/cross.yml @@ -6,10 +6,11 @@ on: pull_request: branches: - develop - + #- release push: branches: - develop + #- release jobs: pkvisor: @@ -46,7 +47,6 @@ jobs: with: path: bin/sccache key: sccache-v0.2.15 - - name: Install sccache if: steps.cache-sccache.outputs.cache-hit != 'true' run: | @@ -61,7 +61,6 @@ jobs: with: path: toolchain key: toolchain-test-${{matrix.toolchain}} - - name: Install compiler toolchain if: steps.cache-toolchain.outputs.cache-hit != 'true' run: | @@ -69,16 +68,15 @@ jobs: curl -L "${{matrix.toolchain}}" | tar -C toolchain -xz --strip-components=1 - name: Install Conan - run: pip install --no-cache-dir 'conan==1.59.0' --force-reinstall - + run: pip install --no-cache-dir conan - name: Create Conan configuration run: | # init config CONAN_V2_MODE=1 conan config init conan config set general.revisions_enabled=1 + conan remote add ns1labs https://ns1labs.jfrog.io/artifactory/api/conan/ns1labs-conan # add custom compiler settings for libc python3 -c 'import yaml; p = "${{env.CONAN_USER_HOME}}/.conan/settings.yml"; d = yaml.safe_load(open(p)); d["compiler"]["gcc"]["libc"] = ["None", "glibc", "musl"]; yaml.safe_dump(d, open(p, "w"))' - - name: Create Conan host profile run: | cat > "${{env.CONAN_USER_HOME}}/.conan/profiles/host" << "EOF" @@ -109,20 +107,17 @@ jobs: sccache-${{matrix.arch}}-${{github.base_ref||github.event.repository.default_branch}}- - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v2 with: path: src - - name: Remove crashpad from conanfile run: | sed -i -e "s/crashpad.*//g" ${{github.workspace}}/src/conanfile.txt - - name: Install dependencies run: | mkdir build cd build conan install -pr:b default -pr:h host -g virtualenv --build=missing "${{github.workspace}}/src" - - name: Configure run: | cd build @@ -137,7 +132,6 @@ jobs: -DProtobuf_PROTOC_EXECUTABLE=$(command -v protoc) \ -DCORRADE_RC_PROGRAM=$(command -v corrade-rc) \ -DCMAKE_CXX_STANDARD_LIBRARIES=-latomic - - name: Build run: | cd build @@ -172,45 +166,31 @@ jobs: - os: macos arch: armv7lh steps: - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Configure CMake to generate VERSION - shell: bash - run: VERSION_ONLY=1 cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release - - - name: Rename folder and copy version - shell: bash - run: | - mv src pktvisor-src - cp -rpf golang/pkg/client/version.go . - - - name: Build pktvisor-cli macos - if: ${{matrix.os}} == macos - uses: ./.github/actions/build-go - with: - context: "." - file: "./Dockerfile" - goos: "darwin" - - - name: Build pktvisor-cli linux armv7lh - if: ${{matrix.arch}} == armv7lh - uses: ./.github/actions/build-go + - name: Setup Go + uses: actions/setup-go@v2 with: - context: "." - file: "./Dockerfile" - goos: "linux" - goarch: "arm" - - - name: Build pktvisor-cli linux aarch64 - if: ${{matrix.arch}} == aarch64 - uses: ./.github/actions/build-go + go-version: 1.17 + - name: Checkout sources + uses: actions/checkout@v2 with: - context: "." - file: "./Dockerfile" - goos: "linux" - goarch: "arm64" - + path: src + - name: Configure + run: | + VERSION_ONLY=1 cmake src + - name: Build + run: | + if [ "${{matrix.os}}" = macos ]; then + export GOOS=darwin + fi + + if [ "${{matrix.arch}}" = armv7lh ]; then + export GOARCH=arm + elif [ "${{matrix.arch}}" = aarch64 ]; then + export GOARCH=arm64 + fi + + cd src/golang + go build -o ${{github.workspace}}/pktvisor-cli ./cmd/pktvisor-cli - name: Upload pktvisor-cli uses: actions/upload-artifact@v2 with: diff --git a/.github/workflows/build_debug.yml b/.github/workflows/debug_build.yml similarity index 84% rename from .github/workflows/build_debug.yml rename to .github/workflows/debug_build.yml index e8320436e..e50013244 100644 --- a/.github/workflows/build_debug.yml +++ b/.github/workflows/debug_build.yml @@ -20,7 +20,7 @@ jobs: outputs: version_number: ${{ steps.build.outputs.version }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: Create Build Environment # Some projects don't allow in-source building, so create a separate build directory @@ -30,17 +30,7 @@ jobs: - name: Get Conan # You may pin to the exact commit or the version. # uses: turtlebrowser/get-conan@4dc7e6dd45c8b1e02e909979d7cfc5ebba6ddbe2 - id: conan - uses: turtlebrowser/get-conan@main - with: - version: 1.59.0 - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- + uses: turtlebrowser/get-conan@v1.0 - name: linux package install run: | @@ -88,20 +78,16 @@ jobs: name: pktvisor verbose: true + - name: Login to ns1labs conan + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan user -p ${{ secrets.CONAN_LABS_PASSWORD }} -r ns1labs ${{ secrets.CONAN_LABS_USERNAME }} + + - name: Cache conan packages + run: CONAN_USER_HOME=${{github.workspace}}/build/conan_home conan upload "*" --all -r ns1labs -c + build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - - name: Create Build Environment - run: cmake -E make_directory ${{github.workspace}}/build - - - name: Setup Conan Cache - uses: actions/cache@v3 - with: - path: ${{github.workspace}}/build/conan_home/ - key: conan-${{ runner.os }}-${{ hashFiles('conanfile.txt', '*/conanfile.txt') }} - restore-keys: conan-${{ runner.os }}- + - uses: actions/checkout@v2 - name: Build pktvisord + push symbol to backtrace.io uses: ./.github/actions/build-cpp @@ -110,9 +96,6 @@ jobs: build_type: "Debug" asan: "ON" symbol_url: ${{secrets.SYMBOL_URL}} - bugsplat_key: ${{secrets.BUGSPLAT_KEY}} - bugsplat_symbol_url: ${{secrets.BUGSPLAT_SYMBOL_URL}} - bugsplat: "true" file: "./Dockerfile" - name: Build pktvisor-cli @@ -178,7 +161,7 @@ jobs: - name: Replace escape url run: | - REPLACE=${{ secrets.BUGSPLAT_CP_URL }} + REPLACE=${{ secrets.CRASHPAD_URL }} ESCAPED_REPLACE=$(printf '%s\n' "$REPLACE" | sed -e 's/[\/&]/\\&/g') sed -i -e "s/CP_URL/$ESCAPED_REPLACE/g" docker/run.sh @@ -190,8 +173,7 @@ jobs: - name: Build, push debug image + crashhandler env: - IMAGE_NAME: orbcommunity/pktvisor + IMAGE_NAME: ns1labs/pktvisor run: | docker build . --file docker/Dockerfile.crashhandler --tag ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} docker push -a ${{ env.IMAGE_NAME }} - diff --git a/.github/workflows/static_build.yml b/.github/workflows/static_build.yml new file mode 100644 index 000000000..9b5d64bb9 --- /dev/null +++ b/.github/workflows/static_build.yml @@ -0,0 +1,129 @@ +name: Static Build + +on: + workflow_dispatch: + pull_request: + branches: + - develop + - release + push: + branches: + - develop + - release + +env: + # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) + BUILD_TYPE: Release + CTEST_OUTPUT_ON_FAILURE: 1 + +jobs: + build: + # The CMake configure and build commands are platform agnostic and should work equally + # well on Windows or Mac. You can convert this to a matrix build if you need + # cross-platform coverage. + # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + strategy: + matrix: + os: [ ubuntu-latest ] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v2 + + - name: Build and test static base + run: | + docker build -f docker/Dockerfile.static-base -t ns1labs/static-base . + + - name: Get branch name + shell: bash + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV + + - name: Debug branch name + run: echo ${{ env.BRANCH_NAME }} + + - name: Get VERSION + run: | + echo "VERSION=`docker run --rm -a stdout --entrypoint cat ns1labs/static-base VERSION`" >> $GITHUB_ENV + + - name: Debug version + run: echo ${{ env.VERSION }} + + - name: Generate ref tag (master) + if: github.event_name != 'pull_request' && env.BRANCH_NAME == 'master' + run: | + echo "REF_TAG=latest" >> $GITHUB_ENV + + - name: Generate ref tag (develop) + if: github.event_name != 'pull_request' && env.BRANCH_NAME == 'develop' + run: | + echo "REF_TAG=latest-develop" >> $GITHUB_ENV + + - name: Generate ref tag (release candidate) + if: github.event_name != 'pull_request' && env.BRANCH_NAME == 'release' + run: | + echo "REF_TAG=latest-rc" >> $GITHUB_ENV + + - name: Debug ref tag + if: github.event_name != 'pull_request' + run: echo ${{ env.REF_TAG }} + + - name: Login to Docker Hub + if: github.event_name != 'pull_request' + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push static pktvisord container + if: github.event_name != 'pull_request' + env: + BASE_BINARY: pktvisord + IMAGE_NAME: ns1labs/pktvisord + run: | + docker build -f docker/Dockerfile.${{ env.BASE_BINARY }}-static -t ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} -t ${{ env.IMAGE_NAME }}:${{ env.VERSION }} . + docker push -a ${{ env.IMAGE_NAME }} + echo "CONT_ID=$(docker create --name ${{ env.BASE_BINARY }}-slim-tmp ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }})" >> $GITHUB_ENV + + - name: Extract static pktvisord asset + if: github.event_name != 'pull_request' + env: + BASE_BINARY: pktvisord + IMAGE_NAME: ns1labs/pktvisord + run: | + docker cp ${{ env.CONT_ID }}:/${{ env.BASE_BINARY }} ${{github.workspace}}/${{ env.BASE_BINARY }}-linux-x86_64-${{ env.VERSION }} + + - name: Upload pktvisord artifact + if: github.event_name != 'pull_request' + env: + BINARY_NAME: pktvisord-linux-x86_64-${{ env.VERSION }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BINARY_NAME }} + path: ${{github.workspace}}/${{ env.BINARY_NAME }} + + - name: Build and push static pktvisor-reader container + if: github.event_name != 'pull_request' + env: + BASE_BINARY: pktvisor-reader + IMAGE_NAME: ns1labs/pktvisor-reader + run: | + docker build -f docker/Dockerfile.${{ env.BASE_BINARY }}-static -t ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }} -t ${{ env.IMAGE_NAME }}:${{ env.VERSION }} . + docker push -a ${{ env.IMAGE_NAME }} + echo "CONT_ID=$(docker create --name ${{ env.BASE_BINARY }}-slim-tmp ${{ env.IMAGE_NAME }}:${{ env.REF_TAG }})" >> $GITHUB_ENV + + - name: Extract static pktvisor-reader asset + if: github.event_name != 'pull_request' + env: + BASE_BINARY: pktvisor-reader + IMAGE_NAME: ns1labs/pktvisor-reader + run: | + docker cp ${{ env.CONT_ID }}:/${{ env.BASE_BINARY }} ${{github.workspace}}/${{ env.BASE_BINARY }}-linux-x86_64-${{ env.VERSION }} + + - name: Upload pktvisor-reader artifact + if: github.event_name != 'pull_request' + env: + BINARY_NAME: pktvisor-reader-linux-x86_64-${{ env.VERSION }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BINARY_NAME }} + path: ${{github.workspace}}/${{ env.BINARY_NAME }} \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/.gitmodules @@ -0,0 +1 @@ + diff --git a/3rd/netflow/netflow.h b/3rd/netflow/netflow.h index 6c1a11ad7..20a24bfac 100644 --- a/3rd/netflow/netflow.h +++ b/3rd/netflow/netflow.h @@ -268,8 +268,8 @@ struct NF10_DATA_FLOWSET_HEADER { /* A record in a NetFlow v.9 template record */ struct peer_nf9_record { - uint32_t type{0}; - uint32_t len{0}; + uint32_t type = 0; + uint32_t len = 0; peer_nf9_record(uint32_t type, uint32_t len) : type(type) @@ -289,8 +289,8 @@ struct peer_nf9_template { /* A record in a NetFlow v.10 template record */ struct peer_nf10_record { - uint32_t type{0}; - uint32_t len{0}; + uint32_t type = 0; + uint32_t len = 0; peer_nf10_record(uint32_t type, uint32_t len) : type(type) diff --git a/CMakeLists.txt b/CMakeLists.txt index a94a22638..e49da2538 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,7 +5,7 @@ cmake_minimum_required(VERSION 3.13) ####################################################### # this is the source of truth for semver version -project(visor VERSION 4.3.0) +project(visor VERSION 4.2.0) # for main line release, this is empty # for development release, this is "-develop" @@ -73,6 +73,9 @@ if(PKTVISOR_CONAN_INIT) message(STATUS "Using new Conan environment") set(ENV{CONAN_USER_HOME} ${CMAKE_BINARY_DIR}/conan_home) set(ENV{CONAN_REVISIONS_ENABLED} 1) + conan_add_remote(NAME ns1labs INDEX 0 + URL https://ns1labs.jfrog.io/artifactory/api/conan/ns1labs-conan + VERIFY_SSL True) endif() if(WIN32) diff --git a/README.md b/README.md index bc366cc44..6701a198d 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ ![pktvisor](docs/images/pktvisor-header.png) -[![Build status](https://github.com/orb-community/pktvisor/workflows/Build/badge.svg)](https://github.com/orb-community/pktvisor/actions) -[![CodeQL](https://github.com/orb-community/pktvisor/workflows/CodeQL/badge.svg)](https://github.com/orb-community/pktvisor/security/code-scanning) -[![CodeCov](https://codecov.io/gh/orb-community/pktvisor/branch/develop/graph/badge.svg)](https://app.codecov.io/gh/orb-community/pktvisor/tree/develop) +[![Build status](https://github.com/ns1labs/pktvisor/workflows/Build/badge.svg)](https://github.com/ns1labs/pktvisor/actions) +[![CodeQL](https://github.com/ns1labs/pktvisor/workflows/CodeQL/badge.svg)](https://github.com/ns1labs/pktvisor/security/code-scanning) +[![CodeCov](https://codecov.io/gh/ns1labs/pktvisor/branch/develop/graph/badge.svg)](https://app.codecov.io/gh/ns1labs/pktvisor/tree/develop)

@@ -23,7 +23,7 @@ network data streams and extracting actionable insights directly from the edge w It is resource efficient and built from the ground up to be modular and dynamically controlled in real time via API and YAML policies. Input and analyzer modules may be dynamically loaded at runtime. Metric output can be used and visualized both on-node via command line UI (for localized, hyper real-time actions) -as well as centrally collected into industry standard observability stacks like Prometheus and Grafana.. +as well as centrally collected into industry standard observability stacks like Prometheus and Grafana. The [input stream system](src/inputs) is designed to _tap into_ data streams. It currently supports [packet capture](https://en.wikipedia.org/wiki/Packet_analyzer), [dnstap](https://dnstap.info/), [sFlow](https://en.wikipedia.org/wiki/SFlow) and [Netflow](https://en.wikipedia.org/wiki/NetFlow)/[IPFIX](https://en.wikipedia.org/wiki/IP_Flow_Information_Export) and will soon support additional taps such as @@ -38,7 +38,7 @@ The [stream analyzer system](src/handlers) includes full application layer analy * Set Cardinality * GeoIP/ASN -Please see the list of [current metrics](https://github.com/orb-community/pktvisor/wiki/Current-Metrics) or the [sample metric output](https://github.com/orb-community/pktvisor/wiki/Sample-pktvisor-Output-Data). +Please see the list of [current metrics](https://github.com/ns1labs/pktvisor/wiki/Current-Metrics) or the [sample metric output](https://github.com/ns1labs/pktvisor/wiki/Sample-pktvisor-Output-Data). pktvisor has its origins in observability of critical internet infrastructure in support of DDoS protection, traffic engineering, and ongoing operations. @@ -55,17 +55,17 @@ the [Network](src/handlers/net) and [DNS](src/handlers/dns) stream processors, a ### Docker One of the easiest ways to get started with pktvisor is to use -the [public docker image](https://hub.docker.com/r/orbcommunity/pktvisor). The image contains the collector +the [public docker image](https://hub.docker.com/r/ns1labs/pktvisor). The image contains the collector agent (`pktvisord`), the command line UI (`pktvisor-cli`) and the pcap and dnstap file analyzer (`pktvisor-reader`). When running the container, you specify which tool to run. 1. *Pull the container* ``` -docker pull orbcommunity/pktvisor +docker pull ns1labs/pktvisor ``` -or use `orbcommunity/pktvisor:latest-develop` to get the latest development version. +or use `ns1labs/pktvisor:latest-develop` to get the latest development version. 2. *Start the collector agent* @@ -75,7 +75,7 @@ _Note that this step requires docker host networking_ to observe traffic outside that [currently only Linux supports host networking](https://docs.docker.com/network/host/): ``` -docker run --net=host -d orbcommunity/pktvisor pktvisord eth0 +docker run --net=host -d ns1labs/pktvisor pktvisord eth0 ``` If the container does not stay running, check the `docker logs` output. @@ -87,13 +87,13 @@ UI (`pktvisor-cli`) in the foreground, and exit when Ctrl-C is pressed. It conne the built in REST API. ``` -docker run -it --rm --net=host orbcommunity/pktvisor pktvisor-cli +docker run -it --rm --net=host ns1labs/pktvisor pktvisor-cli ``` ### Linux Static Binary (AppImage, x86_64) You may also use the Linux all-in-one binary, built with [AppImage](https://appimage.org/), which is available for -download [on the Releases page](https://github.com/orb-community/pktvisor/releases). It is designed to work on all modern +download [on the Releases page](https://github.com/ns1labs/pktvisor/releases). It is designed to work on all modern Linux distributions and does not require installation or any other dependencies. ```shell @@ -168,7 +168,7 @@ sudo setcap cap_net_raw,cap_net_admin=eip //pktvisord-x86_64 Current command line options are described with: ``` -docker run --rm orbcommunity/pktvisor pktvisord --help +docker run --rm ns1labs/pktvisor pktvisord --help ``` or @@ -320,7 +320,7 @@ If running in a Docker container, you must mount the configuration file into the is on the host at `/local/pktvisor/agent.yaml`, you can mount it into the container and use it with this command: ```shell -docker run -v /local/pktvisor:/usr/local/pktvisor/ --net=host orbcommunity/pktvisor pktvisord --config /usr/local/pktvisor/agent.yaml --admin-api +docker run -v /local/pktvisor:/usr/local/pktvisor/ --net=host ns1labs/pktvisor pktvisord --config /usr/local/pktvisor/agent.yaml --admin-api ``` @@ -331,7 +331,7 @@ summarization, which is by default a sliding 5 minute time window. It can also c host. ``` -docker run --rm orbcommunity/pktvisor pktvisor-cli -h +docker run --rm ns1labs/pktvisor pktvisor-cli -h ``` ```shell @@ -367,7 +367,7 @@ using a tool such as [golang-dnstap](https://github.com/dnstap/golang-dnstap). Both take many of the same options, and do all of the same analysis, as `pktvisord` for live capture. pcap files may include Flow capture data. ``` -docker run --rm orbcommunity/pktvisor pktvisor-reader --help +docker run --rm ns1labs/pktvisor pktvisor-reader --help ``` ```shell @@ -404,7 +404,7 @@ You can use the docker container by passing in a volume referencing the director output will contain the JSON summarization output, which you can capture or pipe into other tools, for example: ``` -$ docker run --rm -v /pktvisor/src/tests/fixtures:/pcaps orbcommunity/pktvisor pktvisor-reader /pcaps/dns_ipv4_udp.pcap | jq . +$ docker run --rm -v /pktvisor/src/tests/fixtures:/pcaps ns1labs/pktvisor pktvisor-reader /pcaps/dns_ipv4_udp.pcap | jq . [2021-03-11 18:45:04.572] [pktvisor] [info] Load input plugin: PcapInputModulePlugin dev.visor.module.input/1.0 [2021-03-11 18:45:04.573] [pktvisor] [info] Load handler plugin: DnsHandler dev.visor.module.handler/1.0 @@ -524,14 +524,14 @@ You can set the `instance` label by passing `--prom-instance ID` If you are interested in centralized collection using [remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage), including to -cloud providers, there is a [docker image available](https://hub.docker.com/r/orbcommunity/pktvisor-prom-write) to make this +cloud providers, there is a [docker image available](https://hub.docker.com/r/ns1labs/pktvisor-prom-write) to make this easy. See [centralized_collection/prometheus](centralized_collection/prometheus) for more. Also see [getorb.io](https://getorb.io) for information on connecting pktvisor agents to the Orb observability platform. ### REST API -REST API documentation is available in [OpenAPI Format](https://app.swaggerhub.com/apis/orb-community/pktvisor/3.0.0-oas3) +REST API documentation is available in [OpenAPI Format](https://app.swaggerhub.com/apis/ns1labs/pktvisor/3.0.0-oas3) Please note that the administration control plane API (`--admin-api`) is currently undergoing heavy iteration and so is not yet documented. If you have a use case that requires the administration API, please [contact us](#contact-us) to @@ -545,7 +545,7 @@ ingress and egress traffic: ``` docker run --rm --net=host -d \ --mount type=bind,source=/opt/geo,target=/geo \ - orbcommunity/pktvisor pktvisord \ + ns1labs/pktvisor pktvisord \ --geo-city /geo/GeoIP2-City.mmdb \ --geo-asn /geo/GeoIP2-ISP.mmdb \ -H 192.168.0.54/32,127.0.0.1/32 \ @@ -574,10 +574,10 @@ Please [contact us](#contact-us) if you have any questions on installation, use, We are very interested in hearing about your use cases, feature requests, and other feedback! -* [File an issue](https://github.com/orb-community/pktvisor/issues/new) -* See existing [issues](https://github.com/orb-community/pktvisor/issues) -* Start a [Discussion](https://github.com/orb-community/pktvisor/discussions) -* [Join us on Slack](https://join.slack.com/t/orb-community/shared_invite/zt-qqsm5cb4-9fsq1xa~R3h~nX6W0sJzmA) +* [File an issue](https://github.com/ns1labs/pktvisor/issues/new) +* See existing [issues](https://github.com/ns1labs/pktvisor/issues) +* Start a [Discussion](https://github.com/ns1labs/pktvisor/discussions) +* [Join us on Slack](https://join.slack.com/t/ns1labs/shared_invite/zt-qqsm5cb4-9fsq1xa~R3h~nX6W0sJzmA) * Send mail to [info@pktvisor.dev](mailto:info@pktvisor.dev) ## Build @@ -591,7 +591,7 @@ pktvisor is developed and tested on Linux and OSX. A Windows port is in progress #### Dependencies -* [Conan](https://conan.io/) 1.X C++ package manager +* [Conan](https://conan.io/) C++ package manager * CMake >= 3.13 (`cmake`) * C++ compiler supporting C++17 @@ -603,7 +603,7 @@ The general build steps are: ``` # clone the repository -git clone https://github.com/orb-community/pktvisor.git +git clone https://github.com/ns1labs/pktvisor.git cd pktvisor mkdir build && cd build @@ -618,8 +618,8 @@ bin/pktvisord --help ``` As development environments can vary widely, please see -the [Dockerfile](https://github.com/orb-community/pktvisor/blob/master/docker/Dockerfile) -and [Continuous Integration build file](https://github.com/orb-community/pktvisor/blob/master/.github/workflows/build.yml) for +the [Dockerfile](https://github.com/ns1labs/pktvisor/blob/master/docker/Dockerfile) +and [Continuous Integration build file](https://github.com/ns1labs/pktvisor/blob/master/.github/workflows/build.yml) for reference. ## Contribute diff --git a/RFCs/2021-04-16-76-collection-policies.md b/RFCs/2021-04-16-76-collection-policies.md index ed756a674..480e4a610 100644 --- a/RFCs/2021-04-16-76-collection-policies.md +++ b/RFCs/2021-04-16-76-collection-policies.md @@ -38,8 +38,6 @@ visor: input_type: pcap filter: bpf: "port 53" - config: - tcp_packet_reassembly_cache_limit: 100000 # stream handlers to attach to this input stream # these decide exactly which data to summarize and expose for collection handlers: diff --git a/RFCs/2022-06-23-307-config.md b/RFCs/2022-06-23-307-config.md index bb0b715e6..3d6121985 100644 --- a/RFCs/2022-06-23-307-config.md +++ b/RFCs/2022-06-23-307-config.md @@ -5,7 +5,7 @@ ## Summary -Pktvisord binary allows multiple [command line arguments](https://github.com/orb-community/pktvisor#agent-usage) on its start up. Almost all the parameters can be passed to pktvisord +Pktvisord binary allows multiple [command line arguments](https://github.com/ns1labs/pktvisor#agent-usage) on its start up. Almost all the parameters can be passed to pktvisord via yaml configuration file. Config file can also be combined with command line arguments. Moreover, the config file has a feature to allow setup of global configuration for specific handlers (`global_handler_config`). diff --git a/appimage/Makefile b/appimage/Makefile index bf1c1d0f2..085a7079c 100644 --- a/appimage/Makefile +++ b/appimage/Makefile @@ -1,4 +1,4 @@ -DEV_IMAGE ?= orbcommunity/pktvisor:latest +DEV_IMAGE ?= ns1labs/pktvisor:latest # in dev mode we just use the latest image as the start point ifneq ($(strip $(DEV_MODE)),) diff --git a/appimage/README.md b/appimage/README.md index 74207345e..c6b2d808b 100644 --- a/appimage/README.md +++ b/appimage/README.md @@ -14,7 +14,7 @@ Because the build can take a while you may want to build the appimage from the l You may also specify a custom image in dev mode by setting the env file `DEV_IMAGE=` to the image you wish to build on. Example: -`DEV_IMAGE="orbcommunity/pktvisor:develop" DEV_MODE=t make pktvisor-x86_64.AppImage` +`DEV_IMAGE="ns1labs/pktvisor:develop" DEV_MODE=t make pktvisor-x86_64.AppImage` ## Usage: diff --git a/appimage/pktvisor/TerminalEmulatorRequired.txt b/appimage/pktvisor/TerminalEmulatorRequired.txt index 960af1977..7f0d9f302 100644 --- a/appimage/pktvisor/TerminalEmulatorRequired.txt +++ b/appimage/pktvisor/TerminalEmulatorRequired.txt @@ -1 +1 @@ -This app needs to be run from a terminal to function correctly. See the docs at https://github.com/orb-community/pktvisor for more details. +This app needs to be run from a terminal to function correctly. See the docs at https://github.com/ns1labs/pktvisor for more details. \ No newline at end of file diff --git a/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_all_handlers.md b/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_all_handlers.md index 5374c98f1..f61bad705 100644 --- a/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_all_handlers.md +++ b/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_all_handlers.md @@ -1,6 +1,6 @@ ## Scenario: Create a policy using admin permission with all handlers ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Create a policy with all handlers through a post request on the endpoint: `/api/v1/policies` - Check our method `generate_pcap_policy_with_all_handlers` on [policies.py](../../features/steps/policies.py) in order to have examples of how to do it - Make a get request to the same endpoint diff --git a/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_only_one_handler.md b/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_only_one_handler.md index 05c68f058..9327a0aa6 100644 --- a/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_only_one_handler.md +++ b/automated_tests/docs/pktvisor/create_a_policy_using_admin_permission_with_only_one_handler.md @@ -5,7 +5,7 @@ DNS: ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Create a policy with dns handler through a post request on the endpoint: `/api/v1/policies` - Make a get request to the same endpoint @@ -19,7 +19,7 @@ DNS: NET: ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Create a policy with net handler through a post request on the endpoint: `/api/v1/policies` - Make a get request to the same endpoint @@ -34,7 +34,7 @@ DHCP: ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Create a policy with dhcp handler through a post request on the endpoint: `/api/v1/policies` - Make a get request to the same endpoint @@ -48,7 +48,7 @@ PCAP: ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Create a policy with pcap handler through a post request on the endpoint: `/api/v1/policies` - Make a get request to the same endpoint diff --git a/automated_tests/docs/pktvisor/create_a_policy_without_admin_permission.md b/automated_tests/docs/pktvisor/create_a_policy_without_admin_permission.md index 358e694b5..f8c07ad83 100644 --- a/automated_tests/docs/pktvisor/create_a_policy_without_admin_permission.md +++ b/automated_tests/docs/pktvisor/create_a_policy_without_admin_permission.md @@ -1,7 +1,7 @@ ## Scenario: Create a policy without admin permission ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord ` - Try to create a policy through a post request on the endpoint: `/api/v1/policies` - Make a get request to the same endpoint diff --git a/automated_tests/docs/pktvisor/remove_a_policy_without_admin_permission.md b/automated_tests/docs/pktvisor/remove_a_policy_without_admin_permission.md index 336363c3c..5705d6600 100644 --- a/automated_tests/docs/pktvisor/remove_a_policy_without_admin_permission.md +++ b/automated_tests/docs/pktvisor/remove_a_policy_without_admin_permission.md @@ -1,7 +1,7 @@ ## Scenario: Remove a policy without admin permission ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord ` - Try to remove the default policy through a DELETE request on the endpoint: `/api/v1/policies/{name_of_the_policy}` - Make a get request to the same endpoint diff --git a/automated_tests/docs/pktvisor/remove_policies_using_admin_permission_from_pktvisor_instance.md b/automated_tests/docs/pktvisor/remove_policies_using_admin_permission_from_pktvisor_instance.md index 689af9da1..d4e591463 100644 --- a/automated_tests/docs/pktvisor/remove_policies_using_admin_permission_from_pktvisor_instance.md +++ b/automated_tests/docs/pktvisor/remove_policies_using_admin_permission_from_pktvisor_instance.md @@ -5,7 +5,7 @@ All policies ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Remove the default policy through a DELETE request on the endpoint: `/api/v1/policies/{name_of_the_policy}` - Make a get request to the same endpoint @@ -20,7 +20,7 @@ One policy ## Steps: -- Provide a pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord --admin-api ` +- Provide a pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord --admin-api ` - Create a policy with all handlers through a post request on the endpoint: `/api/v1/policies` - Check our method `generate_pcap_policy_with_all_handlers` on [policies.py](../../features/steps/policies.py) in order to have examples of how to do it - Remove one of the running policies using a DELETE request on the endpoint: `/api/v1/policies/{name_of_the_policy}` diff --git a/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_different_ports.md b/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_different_ports.md index dc5eafa79..2ffe5238c 100644 --- a/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_different_ports.md +++ b/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_different_ports.md @@ -1,8 +1,8 @@ ## Scenario: Run multiple pktvisors instances using different ports ## Steps: -- Provide 1 pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord ` -- Provide 1 pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord -p 10854 ` +- Provide 1 pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord ` +- Provide 1 pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord -p 10854 ` ## Expected Result: diff --git a/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_the_same_ports.md b/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_the_same_ports.md index af41cc789..b4700513c 100644 --- a/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_the_same_ports.md +++ b/automated_tests/docs/pktvisor/run_multiple_pktvisors_instances_using_the_same_ports.md @@ -1,8 +1,8 @@ ## Scenario: Run multiple pktvisors instances using the same ports ## Steps: -- Provide 1 pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord ` -- Provide 1 pktvisor instance using `docker run --net=host -d orbcommunity/pktvisor pktvisord ` +- Provide 1 pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord ` +- Provide 1 pktvisor instance using `docker run --net=host -d ns1labs/pktvisor pktvisord ` ## Expected Result: diff --git a/automated_tests/docs/pktvisor/run_pktvisor_instance_using_docker_command.md b/automated_tests/docs/pktvisor/run_pktvisor_instance_using_docker_command.md index 773747ab0..3d6f73a84 100644 --- a/automated_tests/docs/pktvisor/run_pktvisor_instance_using_docker_command.md +++ b/automated_tests/docs/pktvisor/run_pktvisor_instance_using_docker_command.md @@ -1,6 +1,6 @@ ## Scenario: Run pktvisor instance using docker command ## Steps: - - Run docker using `docker run --net=host -d orbcommunity/pktvisor pktvisord ` + - Run docker using `docker run --net=host -d ns1labs/pktvisor pktvisord ` ## Expected Result: diff --git a/automated_tests/features/steps/test_config.py b/automated_tests/features/steps/test_config.py index 83778ea46..91478cdca 100644 --- a/automated_tests/features/steps/test_config.py +++ b/automated_tests/features/steps/test_config.py @@ -30,7 +30,7 @@ def _read_configs(): configs['sudo'] = "True" assert_that(configs.get('sudo_password'), not_none(), 'Sudo password was not provided!') client = docker.from_env() - configs['pktvisor_docker_image'] = f"orbcommunity/pktvisor:{configs.get('pktvisor_docker_image_tag', 'latest')}" + configs['pktvisor_docker_image'] = f"ns1labs/pktvisor:{configs.get('pktvisor_docker_image_tag', 'latest')}" try: client.images.get(configs['pktvisor_docker_image']) except ImageNotFound: diff --git a/centralized_collection/elastic/docker/with_telegraf/Dockerfile b/centralized_collection/elastic/docker/with_telegraf/Dockerfile index 6dc2502c8..d7c64d9a1 100644 --- a/centralized_collection/elastic/docker/with_telegraf/Dockerfile +++ b/centralized_collection/elastic/docker/with_telegraf/Dockerfile @@ -1,7 +1,7 @@ ARG PKTVISOR_TAG=latest FROM telegraf:1.16.2 as telegraf -FROM orbcommunity/pktvisor:${PKTVISOR_TAG} +FROM ns1labs/pktvisor:${PKTVISOR_TAG} COPY --from=telegraf /usr/bin/telegraf /usr/local/bin/telegraf diff --git a/centralized_collection/prometheus/README.md b/centralized_collection/prometheus/README.md index 09e53c049..7a969b069 100644 --- a/centralized_collection/prometheus/README.md +++ b/centralized_collection/prometheus/README.md @@ -1,7 +1,7 @@ # Centralized Prometheus Collection This directory contains resources for building a docker container aiding centralized prometheus collection. It is -published to Docker hub at https://hub.docker.com/r/orbcommunity/pktvisor-prom-write +published to Docker hub at https://hub.docker.com/r/ns1labs/pktvisor-prom-write It combines pktvisord with the [Grafana Agent](https://github.com/grafana/agent) for collecting and sending metrics to Prometheus through @@ -16,19 +16,19 @@ Grafana installation (ID 14221). Example: ```shell -docker pull orbcommunity/pktvisor-prom-write +docker pull ns1labs/pktvisor-prom-write docker run -d --net=host --env PKTVISORD_ARGS="--prom-instance " \ --env REMOTE_URL="https:///api/prom/push" --env USERNAME="" \ ---env PASSWORD="" orbcommunity/pktvisor-prom-write +--env PASSWORD="" ns1labs/pktvisor-prom-write ``` Example with Geo enabled (assuming files are located in `/usr/local/geo`): ```shell -docker pull orbcommunity/pktvisor-prom-write +docker pull ns1labs/pktvisor-prom-write docker run -d --mount type=bind,source=/usr/local/geo,target=/geo --net=host --env \ PKTVISORD_ARGS="--prom-instance --geo-city /geo/GeoIP2-City.mmdb --geo-asn /geo/GeoIP2-ISP.mmdb " \ ---env REMOTE_URL="https:///api/prom/push" --env USERNAME="" --env PASSWORD="" orbcommunity/pktvisor-prom-write +--env REMOTE_URL="https:///api/prom/push" --env USERNAME="" --env PASSWORD="" ns1labs/pktvisor-prom-write ``` **PKTVISORD_ARGS optionally allows a semicolon delimited list of arguments to handle arguments such as bpf filters that may contain whitespace* @@ -36,10 +36,10 @@ PKTVISORD_ARGS="--prom-instance --geo-city /geo/GeoIP2-City.mmdb --ge Example with Geo enabled and bpf filter (assuming files are located in `/usr/local/geo`): ```shell -docker pull orbcommunity/pktvisor-prom-write +docker pull ns1labs/pktvisor-prom-write docker run -d --mount type=bind,source=/usr/local/geo,target=/geo --net=host --env \ PKTVISORD_ARGS="-b; port 53; --prom-instance; ; --geo-city; /geo/GeoIP2-City.mmdb; --geo-asn; /geo/GeoIP2-ISP.mmdb; " \ ---env REMOTE_URL="https:///api/prom/push" --env USERNAME="" --env PASSWORD="" orbcommunity/pktvisor-prom-write +--env REMOTE_URL="https:///api/prom/push" --env USERNAME="" --env PASSWORD="" ns1labs/pktvisor-prom-write ``` There are a several pieces of information you need to substitute above: diff --git a/centralized_collection/prometheus/docker-grafana-agent/Dockerfile b/centralized_collection/prometheus/docker-grafana-agent/Dockerfile index 59435f097..9dae75965 100644 --- a/centralized_collection/prometheus/docker-grafana-agent/Dockerfile +++ b/centralized_collection/prometheus/docker-grafana-agent/Dockerfile @@ -1,7 +1,7 @@ ARG PKTVISOR_TAG=latest FROM grafana/agent:latest as agent -FROM orbcommunity/pktvisor:${PKTVISOR_TAG} +FROM ns1labs/pktvisor:${PKTVISOR_TAG} COPY --from=agent /bin/agent /usr/local/bin/agent diff --git a/cmake/opentelemetry-proto.cmake b/cmake/opentelemetry-proto.cmake deleted file mode 100644 index 0edeab933..000000000 --- a/cmake/opentelemetry-proto.cmake +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -# -# The dependency on opentelemetry-proto can be provided different ways. -# By order of decreasing priority, options are: -# -# 1 - Use a provided package -# -# This is useful to build opentelemetry-cpp as part of a super project. -# -# The super project provides the path to the opentelemetry-proto -# source code using variable ${OTELCPP_PROTO_PATH} -# -# 2 - Search for a opentelemetry-proto git submodule -# -# When git submodule is used, -# the opentelemetry-proto code is located in: -# third_party/opentelemetry-proto -# -# 3 - Download opentelemetry-proto from github -# -# Code from the required version is used, -# unless a specific release tag is provided -# in variable ${opentelemetry-proto} -# - -if(OTELCPP_PROTO_PATH) - if(NOT EXISTS "${OTELCPP_PROTO_PATH}/opentelemetry/proto/common/v1/common.proto") - message(FATAL_ERROR "OTELCPP_PROTO_PATH does not point to a opentelemetry-proto repository") - endif() - message(STATUS "opentelemetry-proto dependency satisfied by: external path") - set(PROTO_PATH ${OTELCPP_PROTO_PATH}) - set(needs_proto_download FALSE) -else() - if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/third_party/opentelemetry-proto/.git) - message(STATUS "opentelemetry-proto dependency satisfied by: git submodule") - set(PROTO_PATH "${CMAKE_CURRENT_SOURCE_DIR}/third_party/opentelemetry-proto") - set(needs_proto_download FALSE) - else() - message(STATUS "opentelemetry-proto dependency satisfied by: github download") - if("${opentelemetry-proto}" STREQUAL "") - set(opentelemetry-proto "v0.19.0") - endif() - include(ExternalProject) - ExternalProject_Add( - opentelemetry-proto - GIT_REPOSITORY https://github.com/open-telemetry/opentelemetry-proto.git - GIT_TAG "${opentelemetry-proto}" - UPDATE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - CONFIGURE_COMMAND "" - TEST_AFTER_INSTALL 0 - DOWNLOAD_NO_PROGRESS 1 - LOG_CONFIGURE 1 - LOG_BUILD 1 - LOG_INSTALL 1) - ExternalProject_Get_Property(opentelemetry-proto INSTALL_DIR) - set(PROTO_PATH "${INSTALL_DIR}/src/opentelemetry-proto") - set(needs_proto_download TRUE) - endif() -endif() - -include(${PROJECT_SOURCE_DIR}/cmake/proto-options-patch.cmake) - -set(COMMON_PROTO "${PROTO_PATH}/opentelemetry/proto/common/v1/common.proto") -set(RESOURCE_PROTO - "${PROTO_PATH}/opentelemetry/proto/resource/v1/resource.proto") -set(TRACE_PROTO "${PROTO_PATH}/opentelemetry/proto/trace/v1/trace.proto") -set(LOGS_PROTO "${PROTO_PATH}/opentelemetry/proto/logs/v1/logs.proto") -set(METRICS_PROTO "${PROTO_PATH}/opentelemetry/proto/metrics/v1/metrics.proto") - -set(TRACE_SERVICE_PROTO - "${PROTO_PATH}/opentelemetry/proto/collector/trace/v1/trace_service.proto") -set(LOGS_SERVICE_PROTO - "${PROTO_PATH}/opentelemetry/proto/collector/logs/v1/logs_service.proto") -set(METRICS_SERVICE_PROTO - "${PROTO_PATH}/opentelemetry/proto/collector/metrics/v1/metrics_service.proto" -) - -set(GENERATED_PROTOBUF_PATH - "${CMAKE_BINARY_DIR}/generated/third_party/opentelemetry-proto") - -file(MAKE_DIRECTORY "${GENERATED_PROTOBUF_PATH}") - -set(COMMON_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/common/v1/common.pb.cc") -set(COMMON_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/common/v1/common.pb.h") -set(RESOURCE_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/resource/v1/resource.pb.cc") -set(RESOURCE_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/resource/v1/resource.pb.h") -set(TRACE_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/trace/v1/trace.pb.cc") -set(TRACE_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/trace/v1/trace.pb.h") -set(LOGS_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/logs/v1/logs.pb.cc") -set(LOGS_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/logs/v1/logs.pb.h") -set(METRICS_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/metrics/v1/metrics.pb.cc") -set(METRICS_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/metrics/v1/metrics.pb.h") - -set(TRACE_SERVICE_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/trace/v1/trace_service.pb.cc" -) -set(TRACE_SERVICE_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/trace/v1/trace_service.pb.h" -) -if(WITH_OTLP_GRPC) - set(TRACE_SERVICE_GRPC_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/trace/v1/trace_service.grpc.pb.cc" - ) - set(TRACE_SERVICE_GRPC_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/trace/v1/trace_service.grpc.pb.h" - ) -endif() -set(LOGS_SERVICE_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/logs/v1/logs_service.pb.cc" -) -set(LOGS_SERVICE_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/logs/v1/logs_service.pb.h" -) -if(WITH_OTLP_GRPC) - set(LOGS_SERVICE_GRPC_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/logs/v1/logs_service.grpc.pb.cc" - ) - set(LOGS_SERVICE_GRPC_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/logs/v1/logs_service.grpc.pb.h" - ) -endif() -set(METRICS_SERVICE_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.cc" -) -set(METRICS_SERVICE_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" -) -if(WITH_OTLP_GRPC) - set(METRICS_SERVICE_GRPC_PB_CPP_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc" - ) - set(METRICS_SERVICE_GRPC_PB_H_FILE - "${GENERATED_PROTOBUF_PATH}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.h" - ) -endif() - -foreach(IMPORT_DIR ${PROTOBUF_IMPORT_DIRS}) - list(APPEND PROTOBUF_INCLUDE_FLAGS "-I${IMPORT_DIR}") -endforeach() - -if(WITH_OTLP_GRPC) - if(CMAKE_CROSSCOMPILING) - find_program(gRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) - else() - if(TARGET gRPC::grpc_cpp_plugin) - project_build_tools_get_imported_location(gRPC_CPP_PLUGIN_EXECUTABLE - gRPC::grpc_cpp_plugin) - else() - find_program(gRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) - endif() - endif() - message(STATUS "gRPC_CPP_PLUGIN_EXECUTABLE=${gRPC_CPP_PLUGIN_EXECUTABLE}") -endif() - -if(WITH_OTLP_GRPC) - add_custom_command( - OUTPUT ${COMMON_PB_H_FILE} - ${COMMON_PB_CPP_FILE} - ${RESOURCE_PB_H_FILE} - ${RESOURCE_PB_CPP_FILE} - ${TRACE_PB_H_FILE} - ${TRACE_PB_CPP_FILE} - ${LOGS_PB_H_FILE} - ${LOGS_PB_CPP_FILE} - ${METRICS_PB_H_FILE} - ${METRICS_PB_CPP_FILE} - ${TRACE_SERVICE_PB_H_FILE} - ${TRACE_SERVICE_PB_CPP_FILE} - ${TRACE_SERVICE_GRPC_PB_H_FILE} - ${TRACE_SERVICE_GRPC_PB_CPP_FILE} - ${LOGS_SERVICE_PB_H_FILE} - ${LOGS_SERVICE_PB_CPP_FILE} - ${LOGS_SERVICE_GRPC_PB_H_FILE} - ${LOGS_SERVICE_GRPC_PB_CPP_FILE} - ${METRICS_SERVICE_PB_H_FILE} - ${METRICS_SERVICE_PB_CPP_FILE} - ${METRICS_SERVICE_GRPC_PB_H_FILE} - ${METRICS_SERVICE_GRPC_PB_CPP_FILE} - COMMAND - ${PROTOBUF_PROTOC_EXECUTABLE} ARGS "--experimental_allow_proto3_optional" - "--proto_path=${PROTO_PATH}" ${PROTOBUF_INCLUDE_FLAGS} - "--cpp_out=${GENERATED_PROTOBUF_PATH}" - "--grpc_out=generate_mock_code=true:${GENERATED_PROTOBUF_PATH}" - --plugin=protoc-gen-grpc="${gRPC_CPP_PLUGIN_EXECUTABLE}" ${COMMON_PROTO} - ${RESOURCE_PROTO} ${TRACE_PROTO} ${LOGS_PROTO} ${METRICS_PROTO} - ${TRACE_SERVICE_PROTO} ${LOGS_SERVICE_PROTO} ${METRICS_SERVICE_PROTO}) -else() - add_custom_command( - OUTPUT ${COMMON_PB_H_FILE} - ${COMMON_PB_CPP_FILE} - ${RESOURCE_PB_H_FILE} - ${RESOURCE_PB_CPP_FILE} - ${TRACE_PB_H_FILE} - ${TRACE_PB_CPP_FILE} - ${LOGS_PB_H_FILE} - ${LOGS_PB_CPP_FILE} - ${METRICS_PB_H_FILE} - ${METRICS_PB_CPP_FILE} - ${TRACE_SERVICE_PB_H_FILE} - ${TRACE_SERVICE_PB_CPP_FILE} - ${LOGS_SERVICE_PB_H_FILE} - ${LOGS_SERVICE_PB_CPP_FILE} - ${METRICS_SERVICE_PB_H_FILE} - ${METRICS_SERVICE_PB_CPP_FILE} - COMMAND - ${PROTOBUF_PROTOC_EXECUTABLE} ARGS "--experimental_allow_proto3_optional" - "--proto_path=${PROTO_PATH}" ${PROTOBUF_INCLUDE_FLAGS} - "--cpp_out=${GENERATED_PROTOBUF_PATH}" ${COMMON_PROTO} ${RESOURCE_PROTO} - ${TRACE_PROTO} ${LOGS_PROTO} ${METRICS_PROTO} ${TRACE_SERVICE_PROTO} - ${LOGS_SERVICE_PROTO} ${METRICS_SERVICE_PROTO}) -endif() - -include_directories("${GENERATED_PROTOBUF_PATH}") - -if(WITH_OTLP_GRPC) - add_library( - opentelemetry_proto STATIC - ${COMMON_PB_CPP_FILE} - ${RESOURCE_PB_CPP_FILE} - ${TRACE_PB_CPP_FILE} - ${LOGS_PB_CPP_FILE} - ${METRICS_PB_CPP_FILE} - ${TRACE_SERVICE_PB_CPP_FILE} - ${TRACE_SERVICE_GRPC_PB_CPP_FILE} - ${LOGS_SERVICE_PB_CPP_FILE} - ${LOGS_SERVICE_GRPC_PB_CPP_FILE} - ${METRICS_SERVICE_PB_CPP_FILE} - ${METRICS_SERVICE_GRPC_PB_CPP_FILE}) -else() - add_library( - opentelemetry_proto STATIC - ${COMMON_PB_CPP_FILE} - ${RESOURCE_PB_CPP_FILE} - ${TRACE_PB_CPP_FILE} - ${LOGS_PB_CPP_FILE} - ${METRICS_PB_CPP_FILE} - ${TRACE_SERVICE_PB_CPP_FILE} - ${LOGS_SERVICE_PB_CPP_FILE} - ${METRICS_SERVICE_PB_CPP_FILE}) -endif() - -if(needs_proto_download) - add_dependencies(opentelemetry_proto opentelemetry-proto) -endif() -set_target_properties(opentelemetry_proto PROPERTIES EXPORT_NAME proto) -patch_protobuf_targets(opentelemetry_proto) - -install( - TARGETS opentelemetry_proto - EXPORT "${PROJECT_NAME}-target" - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) - -install( - DIRECTORY ${GENERATED_PROTOBUF_PATH}/opentelemetry - DESTINATION include - FILES_MATCHING - PATTERN "*.h") - -if(TARGET protobuf::libprotobuf) - target_link_libraries(opentelemetry_proto PUBLIC ${CONAN_LIBS_PROTOBUF}) -else() # cmake 3.8 or lower - target_include_directories(opentelemetry_proto - PUBLIC ${Protobuf_INCLUDE_DIRS}) - target_link_libraries(opentelemetry_proto INTERFACE ${Protobuf_LIBRARIES}) -endif() - -if(BUILD_SHARED_LIBS) - set_property(TARGET opentelemetry_proto PROPERTY POSITION_INDEPENDENT_CODE ON) -endif() diff --git a/cmake/proto-options-patch.cmake b/cmake/proto-options-patch.cmake deleted file mode 100644 index ed294ea79..000000000 --- a/cmake/proto-options-patch.cmake +++ /dev/null @@ -1,116 +0,0 @@ -macro(check_append_cxx_compiler_flag OUTPUT_VAR) - foreach(CHECK_FLAG ${ARGN}) - check_cxx_compiler_flag(${CHECK_FLAG} - "check_cxx_compiler_flag_${CHECK_FLAG}") - if(check_cxx_compiler_flag_${CHECK_FLAG}) - list(APPEND ${OUTPUT_VAR} ${CHECK_FLAG}) - endif() - endforeach() -endmacro() - -if(NOT PATCH_PROTOBUF_SOURCES_OPTIONS_SET) - if(MSVC) - unset(PATCH_PROTOBUF_SOURCES_OPTIONS CACHE) - set(PATCH_PROTOBUF_SOURCES_OPTIONS /wd4244 /wd4251 /wd4267 /wd4309) - - if(MSVC_VERSION GREATER_EQUAL 1922) - # see - # https://docs.microsoft.com/en-us/cpp/overview/cpp-conformance-improvements?view=vs-2019#improvements_162 - # for detail - list(APPEND PATCH_PROTOBUF_SOURCES_OPTIONS /wd5054) - endif() - - if(MSVC_VERSION GREATER_EQUAL 1925) - list(APPEND PATCH_PROTOBUF_SOURCES_OPTIONS /wd4996) - endif() - - if(MSVC_VERSION LESS 1910) - list(APPEND PATCH_PROTOBUF_SOURCES_OPTIONS /wd4800) - endif() - else() - unset(PATCH_PROTOBUF_SOURCES_OPTIONS CACHE) - include(CheckCXXCompilerFlag) - check_append_cxx_compiler_flag( - PATCH_PROTOBUF_SOURCES_OPTIONS -Wno-type-limits - -Wno-deprecated-declarations -Wno-unused-parameter) - endif() - set(PATCH_PROTOBUF_SOURCES_OPTIONS_SET TRUE) - if(PATCH_PROTOBUF_SOURCES_OPTIONS) - set(PATCH_PROTOBUF_SOURCES_OPTIONS - ${PATCH_PROTOBUF_SOURCES_OPTIONS} - CACHE INTERNAL - "Options to disable warning of generated protobuf sources" FORCE) - endif() -endif() - -function(patch_protobuf_sources) - if(PATCH_PROTOBUF_SOURCES_OPTIONS) - foreach(PROTO_SRC ${ARGN}) - unset(PROTO_SRC_OPTIONS) - get_source_file_property(PROTO_SRC_OPTIONS ${PROTO_SRC} COMPILE_OPTIONS) - if(PROTO_SRC_OPTIONS) - list(APPEND PROTO_SRC_OPTIONS ${PATCH_PROTOBUF_SOURCES_OPTIONS}) - else() - set(PROTO_SRC_OPTIONS ${PATCH_PROTOBUF_SOURCES_OPTIONS}) - endif() - - set_source_files_properties( - ${PROTO_SRC} PROPERTIES COMPILE_OPTIONS "${PROTO_SRC_OPTIONS}") - endforeach() - unset(PROTO_SRC) - unset(PROTO_SRC_OPTIONS) - endif() -endfunction() - -function(patch_protobuf_targets) - if(PATCH_PROTOBUF_SOURCES_OPTIONS) - foreach(PROTO_TARGET ${ARGN}) - unset(PROTO_TARGET_OPTIONS) - get_target_property(PROTO_TARGET_OPTIONS ${PROTO_TARGET} COMPILE_OPTIONS) - if(PROTO_TARGET_OPTIONS) - list(APPEND PROTO_TARGET_OPTIONS ${PATCH_PROTOBUF_SOURCES_OPTIONS}) - else() - set(PROTO_TARGET_OPTIONS ${PATCH_PROTOBUF_SOURCES_OPTIONS}) - endif() - - set_target_properties( - ${PROTO_TARGET} PROPERTIES COMPILE_OPTIONS "${PROTO_TARGET_OPTIONS}") - endforeach() - unset(PROTO_TARGET) - unset(PROTO_TARGET_OPTIONS) - endif() -endfunction() - -function(project_build_tools_get_imported_location OUTPUT_VAR_NAME TARGET_NAME) - if(CMAKE_BUILD_TYPE) - string(TOUPPER "IMPORTED_LOCATION_${CMAKE_BUILD_TYPE}" - TRY_SPECIFY_IMPORTED_LOCATION) - get_target_property(${OUTPUT_VAR_NAME} ${TARGET_NAME} - ${TRY_SPECIFY_IMPORTED_LOCATION}) - endif() - if(NOT ${OUTPUT_VAR_NAME}) - get_target_property(${OUTPUT_VAR_NAME} ${TARGET_NAME} IMPORTED_LOCATION) - endif() - if(NOT ${OUTPUT_VAR_NAME}) - get_target_property( - project_build_tools_get_imported_location_IMPORTED_CONFIGURATIONS - ${TARGET_NAME} IMPORTED_CONFIGURATIONS) - foreach( - project_build_tools_get_imported_location_IMPORTED_CONFIGURATION IN - LISTS project_build_tools_get_imported_location_IMPORTED_CONFIGURATIONS) - get_target_property( - ${OUTPUT_VAR_NAME} - ${TARGET_NAME} - "IMPORTED_LOCATION_${project_build_tools_get_imported_location_IMPORTED_CONFIGURATION}" - ) - if(${OUTPUT_VAR_NAME}) - break() - endif() - endforeach() - endif() - if(${OUTPUT_VAR_NAME}) - set(${OUTPUT_VAR_NAME} - ${${OUTPUT_VAR_NAME}} - PARENT_SCOPE) - endif() -endfunction() diff --git a/cmd/pktvisor-reader/main.cpp b/cmd/pktvisor-reader/main.cpp index f5e08f16d..fcf1d4713 100644 --- a/cmd/pktvisor-reader/main.cpp +++ b/cmd/pktvisor-reader/main.cpp @@ -20,10 +20,10 @@ #include "GeoDB.h" #include "handlers/dhcp/DhcpStreamHandler.h" #include "handlers/dns/v1/DnsStreamHandler.h" -#include "handlers/net/v1/NetStreamHandler.h" -#include "inputs/dnstap/DnstapInputStream.h" -#include "inputs/flow/FlowInputStream.h" +#include "handlers/net/NetStreamHandler.h" #include "inputs/pcap/PcapInputStream.h" +#include "inputs/flow/FlowInputStream.h" +#include "inputs/dnstap/DnstapInputStream.h" static const char USAGE[] = R"(pktvisor-reader diff --git a/cmd/pktvisord/main.cpp b/cmd/pktvisord/main.cpp index f01563b3b..ffc243d9b 100644 --- a/cmd/pktvisord/main.cpp +++ b/cmd/pktvisord/main.cpp @@ -100,15 +100,6 @@ static const char USAGE[] = Prometheus Options: --prometheus Ignored, Prometheus output always enabled (left for backwards compatibility) --prom-instance ID Optionally set the 'instance' label to given ID - OpenTelemetry Options - --otel Enable OpenTelemetry OTLP exporter over HTTP(S) - --otel-host HOST Set OTEL destination IP where the data will be pushed to (default: localhost) - --otel-path PATH Set OTEL destination URL path (default: /v1/metrics) - --otel-port PORT Set OTEL destination port number (default: 4318) - --otel-interval N The interval in seconds that exporter will periodically push data (default: 60) - --otel-tls Enable TLS when connecting to OTEL destination - --otel-tls-cert FILE Use given TLS cert. Required if --otel-tls is enabled. - --otel-tls-key FILE Use given TLS private key. Required if --otel-tls is enabled. Metric Enrichment Options: --iana-service-port-registry FILE IANA Service Name and Transport Protocol Port Number Registry file in CSV format --default-service-registry FILE Default IANA Service Name Port Number Registry CSV file to be loaded if no other is specified @@ -158,18 +149,6 @@ struct CmdOptions { }; WebServer web_server; - struct Opentelemetry { - bool otel_support{false}; - bool tls_support{false}; - std::optional interval; - std::optional port; - std::optional host; - std::optional path; - std::optional tls_cert; - std::optional tls_key; - }; - Opentelemetry otel_setup; - struct Crashpad { bool disable{false}; std::optional token; @@ -203,10 +182,8 @@ void fill_cmd_options(std::map args, CmdOptions &opt logger->error("invalid schema in config file: {}", args["--config"].asString()); exit(EXIT_FAILURE); } - if (!config_file["version"]) { - logger->info("missing version in config file, using version \"1.0\""); - } else if (!config_file["version"].IsScalar() || config_file["version"].as() != "1.0") { - logger->error("unsupported version in config file: {}", args["--config"].asString()); + if (!config_file["version"] || !config_file["version"].IsScalar() || config_file["version"].as() != "1.0") { + logger->error("missing or unsupported version in config file: {}", args["--config"].asString()); exit(EXIT_FAILURE); } @@ -320,53 +297,6 @@ void fill_cmd_options(std::map args, CmdOptions &opt options.web_server.tls_key = config["tls_key"].as(); } - options.otel_setup.otel_support = (config["otel"] && config["otel"].as()) || args["--otel"].asBool(); - options.otel_setup.tls_support = (config["otel_tls"] && config["otel_tls"].as()) || args["--otel-tls"].asBool(); - - if (args["--otel-host"]) { - options.otel_setup.host = args["--otel-host"].asString(); - } else if (config["otel_host"]) { - options.otel_setup.host = config["otel_host"].as(); - } else { - options.otel_setup.host = "localhost"; - } - - if (args["--otel-path"]) { - options.otel_setup.path = args["--otel-path"].asString(); - } else if (config["otel_host"]) { - options.otel_setup.path = config["otel_path"].as(); - } else { - options.otel_setup.path = "/v1/metrics"; - } - - if (args["--otel-port"]) { - options.otel_setup.port = static_cast(args["--otel-port"].asLong()); - } else if (config["otel_port"]) { - options.otel_setup.port = config["otel_port"].as(); - } else { - options.otel_setup.port = 4318; - } - - if (args["--otel-interval"]) { - options.otel_setup.interval = static_cast(args["--otel-interval"].asLong()); - } else if (config["otel_interval"]) { - options.otel_setup.interval = config["otel_interval"].as(); - } else { - options.otel_setup.interval = 60; - } - - if (args["--otel-tls-cert"]) { - options.otel_setup.tls_cert = args["--otel-tls-cert"].asString(); - } else if (config["otel_tls_cert"]) { - options.otel_setup.tls_cert = config["otel_tls_cert"].as(); - } - - if (args["--otel-tls-key"]) { - options.otel_setup.tls_key = args["--otel-tls-key"].asString(); - } else if (config["otel_tls_key"]) { - options.otel_setup.tls_key = config["otel_tls_key"].as(); - } - options.module.list = (config["module_list"] && config["module_list"].as()) || args["--module-list"].asBool(); if (args["--module-dir"]) { @@ -616,26 +546,9 @@ int main(int argc, char *argv[]) logger->info("Enabling TLS with cert {} and key {}", http_config.key, http_config.cert); } - OtelConfig otel_config; - if (options.otel_setup.otel_support) { - otel_config.enable = true; - if (options.otel_setup.tls_support) { - if (!options.otel_setup.tls_key.has_value() || !options.otel_setup.tls_cert.has_value()) { - logger->error("you must specify --otel-tls-key and --otel-tls-cert to use --otel-tls"); - exit(EXIT_FAILURE); - } - otel_config.tls_key = options.otel_setup.tls_key.value(); - otel_config.tls_cert = options.otel_setup.tls_cert.value(); - logger->info("Enabling OTEL TLS with cert {} and key {}", otel_config.tls_key, otel_config.tls_cert); - } - otel_config.path = options.otel_setup.path.value(); - otel_config.endpoint = options.otel_setup.host.value(); - otel_config.port_number = options.otel_setup.port.value(); - } - std::unique_ptr svr; try { - svr = std::make_unique(®istry, logger, http_config, otel_config, prom_config); + svr = std::make_unique(®istry, logger, http_config, prom_config); } catch (const std::exception &e) { logger->error(e.what()); logger->info("exit with failure"); diff --git a/conanfile.txt b/conanfile.txt index 71d786c6f..cb6a9a643 100644 --- a/conanfile.txt +++ b/conanfile.txt @@ -6,17 +6,15 @@ cpp-httplib/0.11.2 docopt.cpp/0.6.3 fmt/8.1.1 fast-cpp-csv-parser/cci.20211104 -json-schema-validator/2.2.0 +json-schema-validator/2.1.0 libmaxminddb/1.7.1 -nlohmann_json/3.11.2 +nlohmann_json/3.10.5 openssl/1.1.1k -opentelemetry-proto/0.19.0 pcapplusplus/22.05 protobuf/3.19.2 sigslot/1.2.1 spdlog/1.11.0 uvw/2.12.1 -libpcap/1.10.1 yaml-cpp/0.7.0 robin-hood-hashing/3.11.5 zlib/1.2.11 diff --git a/docker/Dockerfile b/docker/Dockerfile index 44b82b7ad..360d2a410 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -6,7 +6,7 @@ RUN \ apt-get update && \ apt-get upgrade --yes --force-yes && \ apt-get install --yes --force-yes --no-install-recommends ${BUILD_DEPS} && \ - pip3 install "conan==1.59.0" + pip3 install conan # need git for current hash for VERSION COPY ./.git/ /pktvisor-src/.git/ @@ -50,11 +50,8 @@ RUN \ #create dir and download geo db RUN mkdir /geo-db -RUN cd /geo-db && wget https://github.com/orb-community/geo-asn-database/raw/main/asn.mmdb.gz -RUN cd /geo-db && wget https://github.com/orb-community/geo-asn-database/raw/main/city.mmdb.gz - -RUN mkdir /iana -COPY --from=cppbuild /pktvisor-src/src/tests/fixtures/pktvisor-port-service-names.csv /iana/custom-iana.csv +RUN cd /geo-db && wget https://github.com/ns1labs/geo-asn-database/raw/main/asn.mmdb.gz +RUN cd /geo-db && wget https://github.com/ns1labs/geo-asn-database/raw/main/city.mmdb.gz COPY --from=cppbuild /tmp/build/bin/pktvisord /usr/local/sbin/pktvisord COPY --from=cppbuild /tmp/build/bin/crashpad_handler /usr/local/sbin/crashpad_handler diff --git a/docker/Dockerfile.clang-toolchain b/docker/Dockerfile.clang-toolchain new file mode 100644 index 000000000..61b65fe53 --- /dev/null +++ b/docker/Dockerfile.clang-toolchain @@ -0,0 +1,16 @@ +FROM ns1labs/clang-toolchain:latest +ARG REQUIRE="make cmake python3 py3-pip perl git bash libexecinfo-static libexecinfo-dev" +ARG UID=1000 + +RUN adduser -u ${UID} -D builder + +RUN apk add --no-cache ${REQUIRE} + +RUN ln -s /usr/local/bin/clang /usr/local/bin/cc \ + && ln -s /usr/local/bin/clang /usr/local/bin/gcc \ + && ln -s /usr/local/bin/clang++ /usr/local/bin/c++ \ + && ln -s /usr/local/bin/clang++ /usr/local/bin/g++ \ + && ln -s /usr/local/bin/clang-cpp /usr/local/bin/cpp \ + && pip install conan + +USER builder diff --git a/docker/Dockerfile.crashhandler b/docker/Dockerfile.crashhandler index 4295f2088..d33f892f0 100644 --- a/docker/Dockerfile.crashhandler +++ b/docker/Dockerfile.crashhandler @@ -18,8 +18,8 @@ COPY ./docker/run.sh /run.sh #create dir and download geo db RUN mkdir /geo-db -RUN cd /geo-db && wget https://github.com/orb-community/geo-asn-database/raw/main/asn.mmdb.gz -RUN cd /geo-db && wget https://github.com/orb-community/geo-asn-database/raw/main/city.mmdb.gz +RUN cd /geo-db && wget https://github.com/ns1labs/geo-asn-database/raw/main/asn.mmdb.gz +RUN cd /geo-db && wget https://github.com/ns1labs/geo-asn-database/raw/main/city.mmdb.gz #create dir and setup iana csv RUN mkdir /iana diff --git a/docker/Dockerfile.debug b/docker/Dockerfile.debug new file mode 100644 index 000000000..27ba07c27 --- /dev/null +++ b/docker/Dockerfile.debug @@ -0,0 +1,56 @@ +FROM debian:bullseye-slim AS cppbuild + +ENV BUILD_DEPS "g++ cmake make git pkgconf jq python3-pip python3-setuptools ca-certificates libasan6" + +RUN \ + apt-get update && \ + apt-get upgrade --yes --force-yes && \ + apt-get install --yes --force-yes --no-install-recommends ${BUILD_DEPS} && \ + pip3 install conan + +# need git for current hash for VERSION +COPY ./.git/ /pktvisor-src/.git/ +COPY ./src/ /pktvisor-src/src/ +COPY ./cmd/ /pktvisor-src/cmd/ +COPY ./3rd/ /pktvisor-src/3rd/ +COPY ./docker/ /pktvisor-src/docker/ +COPY ./golang/ /pktvisor-src/golang/ +COPY ./integration_tests/ /pktvisor-src/integration_tests/ +COPY ./cmake/ /pktvisor-src/cmake/ +COPY ./CMakeLists.txt /pktvisor-src/ +COPY ./conanfile.txt /pktvisor-src/ + +WORKDIR /tmp/build +RUN \ + conan profile new --detect default && \ + conan profile update settings.compiler.libcxx=libstdc++11 default && \ + conan config set general.revisions_enabled=1 + +RUN \ + PKG_CONFIG_PATH=/local/lib/pkgconfig cmake -DCMAKE_BUILD_TYPE=Debug -DASAN=ON /pktvisor-src && \ + make all -j 4 + +FROM golang:latest AS gobuild +COPY golang/ /src/ +WORKDIR /src/ +COPY --from=cppbuild /pktvisor-src/golang/pkg/client/version.go /src/pkg/client/version.go +RUN go build -o pktvisor-cli cmd/pktvisor-cli/main.go + +FROM debian:bullseye-slim AS runtime + +ENV RUNTIME_DEPS "curl ca-certificates libasan6 gdb" + +RUN \ + apt-get update && \ + apt-get upgrade --yes --force-yes && \ + apt-get install --yes --force-yes --no-install-recommends ${RUNTIME_DEPS} && \ + rm -rf /var/lib/apt + +COPY --from=cppbuild /tmp/build/bin/pktvisord /usr/local/sbin/pktvisord +COPY --from=cppbuild /tmp/build/bin/pktvisor-reader /usr/local/sbin/pktvisor-reader +COPY --from=gobuild /src/pktvisor-cli /usr/local/bin/pktvisor-cli +COPY docker/entry.sh /entry.sh +RUN chmod a+x /entry.sh + +ENTRYPOINT [ "/entry.sh" ] + diff --git a/docker/Dockerfile.pktvisor-cli b/docker/Dockerfile.pktvisor-cli new file mode 100644 index 000000000..d612ada13 --- /dev/null +++ b/docker/Dockerfile.pktvisor-cli @@ -0,0 +1,32 @@ +FROM debian:bullseye-slim AS cppbuild + +ENV BUILD_DEPS "g++ cmake make git pkgconf jq python3-pip python3-setuptools ca-certificates" + +RUN \ + apt-get update && \ + apt-get upgrade --yes --force-yes && \ + apt-get install --yes --force-yes --no-install-recommends ${BUILD_DEPS} && \ + pip3 install conan + +# need git for current hash for VERSION +COPY ./.git/ /pktvisor-src/.git/ +COPY ./golang/ /pktvisor-src/golang/ +COPY ./CMakeLists.txt /pktvisor-src/ + +WORKDIR /tmp/build + +# ONLY run cmake configure to get version need for go build (no c++ build) +RUN VERSION_ONLY=1 cmake /pktvisor-src + +FROM golang:latest AS gobuild +COPY golang/ /src/ +WORKDIR /src/ +COPY --from=cppbuild /pktvisor-src/golang/pkg/client/version.go /src/pkg/client/version.go +RUN CGO_ENABLED=0 go build -o pktvisor-cli cmd/pktvisor-cli/main.go + +FROM alpine:latest AS runtime + +COPY --from=gobuild /src/pktvisor-cli /pktvisor-cli +COPY docker/entry-cli.sh / + +ENTRYPOINT [ "/entry-cli.sh" ] diff --git a/docker/Dockerfile.pktvisor-reader-static b/docker/Dockerfile.pktvisor-reader-static new file mode 100644 index 000000000..b1d4ae0b2 --- /dev/null +++ b/docker/Dockerfile.pktvisor-reader-static @@ -0,0 +1,7 @@ +FROM ns1labs/static-base AS cppbuild + +FROM scratch AS runtime + +COPY --from=cppbuild /tmp/build/bin/pktvisor-reader /pktvisor-reader + +ENTRYPOINT [ "/pktvisor-reader" ] diff --git a/docker/Dockerfile.pktvisord-static b/docker/Dockerfile.pktvisord-static new file mode 100644 index 000000000..72637d926 --- /dev/null +++ b/docker/Dockerfile.pktvisord-static @@ -0,0 +1,7 @@ +FROM ns1labs/static-base AS cppbuild + +FROM scratch AS runtime + +COPY --from=cppbuild /tmp/build/bin/pktvisord /pktvisord + +ENTRYPOINT [ "/pktvisord" ] diff --git a/docker/Dockerfile.static-base b/docker/Dockerfile.static-base new file mode 100644 index 000000000..32cda78cb --- /dev/null +++ b/docker/Dockerfile.static-base @@ -0,0 +1,31 @@ +FROM ns1labs/clang-toolchain:latest AS cppbuild +ARG REQUIRE="make cmake python3 py3-pip perl git bash libexecinfo-static libexecinfo-dev" + +RUN apk add --no-cache ${REQUIRE} + +RUN ln -s /usr/local/bin/clang /usr/local/bin/cc \ + && ln -s /usr/local/bin/clang /usr/local/bin/gcc \ + && ln -s /usr/local/bin/clang++ /usr/local/bin/c++ \ + && ln -s /usr/local/bin/clang++ /usr/local/bin/g++ \ + && ln -s /usr/local/bin/clang-cpp /usr/local/bin/cpp \ + && pip install conan + +# need git for current hash for VERSION +COPY ./.git/ /pktvisor-src/.git/ +COPY ./src/ /pktvisor-src/src/ +COPY ./cmd/ /pktvisor-src/cmd/ +COPY ./3rd/ /pktvisor-src/3rd/ +COPY ./golang/ /pktvisor-src/golang/ +COPY ./integration_tests/ /pktvisor-src/integration_tests/ +COPY ./cmake/ /pktvisor-src/cmake/ +COPY ./CMakeLists.txt /pktvisor-src/ +COPY ./conanfile.txt /pktvisor-src/ + +WORKDIR /tmp/build + +RUN cmake -DCMAKE_BUILD_TYPE=Release /pktvisor-src \ + && make all test -j 4 + +# after build, use this to upload conan packages to ns1labs jfrog server +# docker run --rm -it ns1labs/static-base +# CONAN_USER_HOME=/tmp/build/conan_home conan upload "*" --all -r ns1labs -c \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile index bfabf258a..71e4b254f 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -1,7 +1,7 @@ -.PHONY: pktvisor +.PHONY: clang-toolchain # for developer environments (e.g. clion docker toolchain) # note does not require/include the source base -pktvisor: - docker build -t orbcommunity/pktvisor -f ./Dockerfile .. +clang-toolchain: + docker build --build-arg UID=$(shell id -u) -t pktvisor/clang-toolchain -f Dockerfile.clang-toolchain . diff --git a/docker/entry-cli.sh b/docker/entry-cli.sh new file mode 100755 index 000000000..14b6cd666 --- /dev/null +++ b/docker/entry-cli.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# this is the entry point to the docker container, and is only used there +set -e +export PATH=$PATH:/usr/local/bin/:/usr/local/sbin/ + +# Add sleep to allow tty to be ready for Docker when using -it +echo "starting pktvisor-cli..." +sleep 1 + +exec /pktvisor-cli diff --git a/docker/entry-cp.sh b/docker/entry-cp.sh index 8c54b33d9..2e5761179 100755 --- a/docker/entry-cp.sh +++ b/docker/entry-cp.sh @@ -14,9 +14,9 @@ trap trapeze SIGINT if [ $# -eq 0 ]; then echo "No arguments provided: specify either 'pktvisor-cli', 'pktvisor-reader' or 'pktvisord'. Try:" - echo "docker run orbcommunity/pktvisor pktvisor-cli -h" - echo "docker run orbcommunity/pktvisor pktvisor-reader --help" - echo "docker run orbcommunity/pktvisor pktvisord --help" + echo "docker run ns1labs/pktvisor pktvisor-cli -h" + echo "docker run ns1labs/pktvisor pktvisor-reader --help" + echo "docker run ns1labs/pktvisor pktvisord --help" exit 1 fi diff --git a/docker/entry.sh b/docker/entry.sh new file mode 100755 index 000000000..b65428857 --- /dev/null +++ b/docker/entry.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# this is the entry point to the docker container, and is only used there + +set -e + +export PATH=$PATH:/usr/local/bin/:/usr/local/sbin/ + +if [ $# -eq 0 ]; then + echo "No arguments provided: specify either 'pktvisor-cli', 'pktvisor-reader' or 'pktvisord'. Try:" + echo "docker run ns1labs/pktvisor pktvisor-cli -h" + echo "docker run ns1labs/pktvisor pktvisor-reader --help" + echo "docker run ns1labs/pktvisor pktvisord --help" + exit 1 +fi + +# backwards compatibility +BINARY="$1" +if [ "$BINARY" = 'pktvisor' ]; then + BINARY='pktvisor-cli' +fi + +if [ "$BINARY" = 'pktvisor-pcap' ]; then + BINARY='pktvisor-reader' +fi + +# Add sleep to allow tty to be ready for Docker when using -it +if [ "$BINARY" = 'pktvisor-cli' ]; then + sleep 1 +fi + +shift +exec "$BINARY" "$@" diff --git a/docs/index.html b/docs/index.html index c42347e08..f67ba00b9 100644 --- a/docs/index.html +++ b/docs/index.html @@ -180,8 +180,8 @@

pktvisor

pktvisor summarizes data streams in real time and provides a clean, time-windowed HTTP interface and command line UI to the results
-
More information: https://orb.community/
-
Contact Info: info@netboxlabs.com
+
More information: https://helloreverb.com
+
Contact Info: ns1labs@ns1.com
Version: 3.0.0-oas3
Apache 2.0
diff --git a/docs/pktvisor-3.0.0-oas3-swagger.yaml b/docs/pktvisor-3.0.0-oas3-swagger.yaml index 8b7d2c9d5..fa15b4d37 100644 --- a/docs/pktvisor-3.0.0-oas3-swagger.yaml +++ b/docs/pktvisor-3.0.0-oas3-swagger.yaml @@ -4,7 +4,7 @@ info: version: 3.0.0-oas3 title: pktvisor contact: - email: info@netboxlabs + email: ns1labs@ns1.com license: name: Apache 2.0 url: 'http://www.apache.org/licenses/LICENSE-2.0.html' @@ -205,4 +205,4 @@ components: in: type: object out: - type: object + type: object \ No newline at end of file diff --git a/libs/CMakeLists.txt b/libs/CMakeLists.txt index 046236340..be2a285fe 100644 --- a/libs/CMakeLists.txt +++ b/libs/CMakeLists.txt @@ -1,6 +1,5 @@ message(STATUS "Custom Libraries") add_subdirectory(visor_transaction) -add_subdirectory(visor_tcp) add_subdirectory(visor_dns) add_subdirectory(visor_utils) \ No newline at end of file diff --git a/libs/visor_dns/CMakeLists.txt b/libs/visor_dns/CMakeLists.txt index 06191035e..8aa95b004 100644 --- a/libs/visor_dns/CMakeLists.txt +++ b/libs/visor_dns/CMakeLists.txt @@ -15,7 +15,6 @@ target_include_directories(VisorLibDns target_link_libraries(VisorLibDns PUBLIC - Visor::Lib::Tcp ${CONAN_LIBS_PCAPPLUSPLUS} ${CONAN_LIBS_PTHREADS4W} ${CONAN_LIBS_LIBPCAP} diff --git a/libs/visor_dns/DnsLayer.h b/libs/visor_dns/DnsLayer.h index ba4567eb8..34fd2a0e8 100644 --- a/libs/visor_dns/DnsLayer.h +++ b/libs/visor_dns/DnsLayer.h @@ -11,7 +11,6 @@ #include "DnsResource.h" #include "DnsResourceData.h" #include -#include #ifdef __GNUC__ #pragma GCC diagnostic pop #endif @@ -46,7 +45,7 @@ struct dnshdr { opcode:4, /** Query/Response flag */ queryOrResponse:1, - /** Response code */ + /** Return Code */ responseCode:4, /** Checking disabled flag */ checkingDisabled:1, @@ -76,7 +75,7 @@ struct dnshdr { authenticData:1, /** Checking disabled flag */ checkingDisabled:1, - /** Response code */ + /** Return Code */ responseCode:4; #endif /** Number of DNS query records in packet */ @@ -114,11 +113,6 @@ struct dnshdr { DnsLayer(pcpp::UdpLayer *udpLayer, pcpp::Packet *packet) : DnsLayer(udpLayer->getData()+sizeof(pcpp::udphdr), udpLayer->getDataLen()-sizeof(pcpp::udphdr), udpLayer, packet) { - } - - DnsLayer(pcpp::TcpLayer *tcpLayer, pcpp::Packet *packet) - : DnsLayer(tcpLayer->getData()+sizeof(pcpp::tcphdr), tcpLayer->getDataLen()-sizeof(pcpp::tcphdr), tcpLayer, packet) - { } /** diff --git a/libs/visor_dns/PublicSuffixList.h b/libs/visor_dns/PublicSuffixList.h index 42e150fab..33c4d977a 100644 --- a/libs/visor_dns/PublicSuffixList.h +++ b/libs/visor_dns/PublicSuffixList.h @@ -18,7 +18,7 @@ namespace visor::lib::dns { using namespace std::literals; // ===BEGIN ICANN DOMAINS=== -static const robin_hood::unordered_node_map> ICANN_DOMAINS = { +static const robin_hood::unordered_map> ICANN_DOMAINS = { {"ac"sv, {"com.ac"sv, "edu.ac"sv, "gov.ac"sv, "net.ac"sv, "mil.ac"sv, "org.ac"sv}}, {"ad"sv, {"nom.ad"sv}}, {"ae"sv, {"co.ae"sv, "net.ae"sv, "org.ae"sv, "sch.ae"sv, "ac.ae"sv, "gov.ae"sv, "mil.ae"sv}}, diff --git a/libs/visor_dns/dns.h b/libs/visor_dns/dns.h index e457c5fa6..0afd89a6b 100644 --- a/libs/visor_dns/dns.h +++ b/libs/visor_dns/dns.h @@ -28,7 +28,6 @@ enum RCode { }; static std::unordered_map QTypeNames({ - {0, "Reserved (0)"}, {1, "A"}, {2, "NS"}, {3, "MD"}, @@ -62,7 +61,6 @@ static std::unordered_map QTypeNames({ {31, "EID"}, {32, "NIMLOC"}, {33, "SRV"}, - {34, "ATMA"}, {35, "NAPTR"}, {36, "KX"}, {37, "CERT"}, @@ -110,19 +108,14 @@ static std::unordered_map QTypeNames({ {252, "AXFR"}, {253, "MAILB"}, {254, "MAILA"}, - {255, "*"}, {256, "URI"}, {257, "CAA"}, {258, "AVC"}, {259, "DOA"}, {260, "AMTRELAY"}, - {32768, "TA"}, - {32769, "DLV"}, - {65535, "Reserved (65535)"}, }); static std::unordered_map QTypeNumbers({ - {"Reserved (0)", 0}, {"A", 1}, {"NS", 2}, {"MD", 3}, @@ -157,7 +150,6 @@ static std::unordered_map QTypeNumbers({ {"NIMLOC", 32}, {"SRV", 33}, {"NAPTR", 35}, - {"ATMA", 34}, {"KX", 36}, {"CERT", 37}, {"A6", 38}, @@ -203,16 +195,12 @@ static std::unordered_map QTypeNumbers({ {"IXFR", 251}, {"AXFR", 252}, {"MAILB", 253}, - {"MAILA", 254}, - {"*", 255}, + {"MAILA", 554}, {"URI", 256}, {"CAA", 257}, {"AVC", 258}, {"DOA", 259}, {"AMTRELAY", 260}, - {"TA", 32768}, - {"DLV", 32769}, - {"Reserved (65535)", 65535}, }); static std::unordered_map RCodeNames({ @@ -226,6 +214,7 @@ static std::unordered_map RCodeNames({ {7, "YXRRSET"}, {8, "NXRRSET"}, {9, "NOTAUTH"}, + {9, "NOTAUTH"}, {10, "NOTZONE"}, {11, "DSOTYPENI"}, {16, "BADVERS"}, @@ -239,28 +228,4 @@ static std::unordered_map RCodeNames({ {23, "BADCOOKIE"}, }); -static std::unordered_map RCodeNumbers({ - {"NOERROR", 0}, - {"FORMERR", 1}, - {"SRVFAIL", 2}, - {"NXDOMAIN", 3}, - {"NOTIMP", 4}, - {"REFUSED", 5}, - {"YXDOMAIN", 6}, - {"YXRRSET", 7}, - {"NXRRSET", 8}, - {"NOTAUTH", 9}, - {"NOTZONE", 10}, - {"DSOTYPENI", 11}, - {"BADVERS", 16}, - {"BADSIG", 16}, - {"BADKEY", 17}, - {"BADTIME", 18}, - {"BADMODE", 19}, - {"BADNAME", 20}, - {"BADALG", 21}, - {"BADTRUNC", 22}, - {"BADCOOKIE", 23}, -}); - } \ No newline at end of file diff --git a/libs/visor_tcp/CMakeLists.txt b/libs/visor_tcp/CMakeLists.txt deleted file mode 100644 index fd00d9d11..000000000 --- a/libs/visor_tcp/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -message(STATUS "Visor Lib TCP Helper") - -add_library(VisorLibTcp - VisorTcpLayer.cpp - ) -add_library(Visor::Lib::Tcp ALIAS VisorLibTcp) - -target_include_directories(VisorLibTcp - PUBLIC - $ - ) - -target_link_libraries(VisorLibTcp - PUBLIC - ${CONAN_LIBS_PCAPPLUSPLUS} - ${CONAN_LIBS_PTHREADS4W} - ${CONAN_LIBS_LIBPCAP} - ${CONAN_LIBS_NPCAP} - ) diff --git a/libs/visor_tcp/VisorTcpLayer.cpp b/libs/visor_tcp/VisorTcpLayer.cpp deleted file mode 100644 index 6e302416b..000000000 --- a/libs/visor_tcp/VisorTcpLayer.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include "VisorTcpLayer.h" - -namespace visor { - - -} diff --git a/libs/visor_tcp/VisorTcpLayer.h b/libs/visor_tcp/VisorTcpLayer.h deleted file mode 100644 index 414357fb8..000000000 --- a/libs/visor_tcp/VisorTcpLayer.h +++ /dev/null @@ -1,24 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public -* License, v. 2.0. If a copy of the MPL was not distributed with this -* file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#pragma once - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -// TCPOPT_CC, TCPOPT_CCNEW and TCPOPT_CCECHO are defined in the MacOS's tcp.h. -#ifdef TCPOPT_CC -#undef TCPOPT_CC -#endif // TCPOPT_CC -#ifdef TCPOPT_CCNEW -#undef TCPOPT_CCNEW -#endif // TCPOPT_CCNEW -#ifdef TCPOPT_CCECHO -#undef TCPOPT_CCECHO -#endif // TCPOPT_CCECHO -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif diff --git a/libs/visor_transaction/TransactionManager.h b/libs/visor_transaction/TransactionManager.h index 4d9ed424f..26a29ce2b 100644 --- a/libs/visor_transaction/TransactionManager.h +++ b/libs/visor_transaction/TransactionManager.h @@ -51,21 +51,15 @@ template class TransactionManager { static_assert(std::is_base_of::value, "TransactionType must inherit from Transaction structure"); - typedef robin_hood::unordered_node_map XactMap; + typedef robin_hood::unordered_map XactMap; - uint32_t _ttl_secs{0}; - uint32_t _ttl_ms{0}; + unsigned int _ttl_secs; XactMap _transactions; public: - TransactionManager(uint32_t ttl_ms = 5000) + TransactionManager(unsigned int ttl_secs = 5) + : _ttl_secs(ttl_secs) { - if (ttl_ms > 1000) { - _ttl_secs = ttl_ms / 1000; - _ttl_ms = ttl_ms - _ttl_secs * 1000; - } else { - _ttl_ms = ttl_ms; - } } void start_transaction(XactID id, TransactionType type) @@ -79,9 +73,7 @@ class TransactionManager auto result = _transactions[id]; timespec_diff(&endTS, &result.startTS, &result.totalTS); _transactions.erase(id); - if (result.totalTS.tv_sec > _ttl_secs) { - return std::pair(Result::TimedOut, result); - } else if (result.totalTS.tv_sec == _ttl_secs && (result.totalTS.tv_nsec / 1.0e6) >= _ttl_ms) { + if (result.totalTS.tv_sec >= _ttl_secs) { return std::pair(Result::TimedOut, result); } else { return std::pair(Result::Valid, result); @@ -105,11 +97,6 @@ class TransactionManager return timed_out.size(); } - void clear() - { - _transactions.clear(); - } - typename XactMap::size_type open_transaction_count() const { return _transactions.size(); diff --git a/libs/visor_utils/EndianPortable.h b/libs/visor_utils/EndianPortable.h deleted file mode 100644 index 2f0f19d29..000000000 --- a/libs/visor_utils/EndianPortable.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * librdkafka - Apache Kafka C library - * - * Copyright (c) 2012-2015 Magnus Edenhill - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ENDIANPORTABLE_H_ -#define _ENDIANPORTABLE_H_ - -/** - * Provides portable endian-swapping macros/functions. - * - * be64toh() - * htobe64() - * be32toh() - * htobe32() - * be16toh() - * htobe16() - * le64toh() - */ - -#ifdef __FreeBSD__ - #include -#elif defined __GLIBC__ - #include - #ifndef be64toh - /* Support older glibc (<2.9) which lack be64toh */ - #include - #if __BYTE_ORDER == __BIG_ENDIAN - #define be16toh(x) (x) - #define be32toh(x) (x) - #define be64toh(x) (x) - #define le64toh(x) __bswap_64 (x) - #define le32toh(x) __bswap_32 (x) - #else - #define be16toh(x) __bswap_16 (x) - #define be32toh(x) __bswap_32 (x) - #define be64toh(x) __bswap_64 (x) - #define le64toh(x) (x) - #define le32toh(x) (x) - #endif - #endif - -#elif defined __CYGWIN__ - #include -#elif defined(WIN32) || defined(WINx64) -#include - #if(BYTE_ORDER == LITTLE_ENDIAN) - #define htobe16(x) htons(x) - #define htole16(x) (x) - #define be16toh(x) ntohs(x) - #define le16toh(x) (x) - - #define htobe32(x) htonl(x) - #define htole32(x) (x) - #define be32toh(x) ntohl(x) - #define le32toh(x) (x) - - #define htobe64(x) htonll(x) - #define htole64(x) (x) - #define be64toh(x) ntohll(x) - #define le64toh(x) (x) - #else - #define htobe16(x) (x) - #define htole16(x) __builtin_bswap16(x) - #define be16toh(x) (x) - #define le16toh(x) __builtin_bswap16(x) - - #define htobe32(x) (x) - #define htole32(x) __builtin_bswap32(x) - #define be32toh(x) (x) - #define le32toh(x) __builtin_bswap32(x) - - #define htobe64(x) (x) - #define htole64(x) __builtin_bswap64(x) - #define be64toh(x) (x) - #define le64toh(x) __builtin_bswap64(x) - #endif -#elif defined __BSD__ - #include -#elif defined sun - #include - #include -#define __LITTLE_ENDIAN 1234 -#define __BIG_ENDIAN 4321 -#ifdef _BIG_ENDIAN -#define __BYTE_ORDER __BIG_ENDIAN -#define be64toh(x) (x) -#define be32toh(x) (x) -#define be16toh(x) (x) -#define le16toh(x) ((uint16_t)BSWAP_16(x)) -#define le32toh(x) BSWAP_32(x) -#define le64toh(x) BSWAP_64(x) -# else -#define __BYTE_ORDER __LITTLE_ENDIAN -#define be64toh(x) BSWAP_64(x) -#define be32toh(x) ntohl(x) -#define be16toh(x) ntohs(x) -#define le16toh(x) (x) -#define le32toh(x) (x) -#define le64toh(x) (x) -#define htole16(x) (x) -#define htole64(x) (x) -#endif /* sun */ - -#elif defined __APPLE__ - #include - #include -#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN -#define be64toh(x) (x) -#define be32toh(x) (x) -#define be16toh(x) (x) -#define le16toh(x) OSSwapInt16(x) -#define le32toh(x) OSSwapInt32(x) -#define le64toh(x) OSSwapInt64(x) -#else -#define be64toh(x) OSSwapInt64(x) -#define be32toh(x) OSSwapInt32(x) -#define be16toh(x) OSSwapInt16(x) -#define le16toh(x) (x) -#define le32toh(x) (x) -#define le64toh(x) (x) -#endif - -#elif defined(_MSC_VER) -#include - -#define be64toh(x) _byteswap_uint64(x) -#define be32toh(x) _byteswap_ulong(x) -#define be16toh(x) _byteswap_ushort(x) -#define le16toh(x) (x) -#define le32toh(x) (x) -#define le64toh(x) (x) - -#elif defined _AIX /* AIX is always big endian */ -#define be64toh(x) (x) -#define be32toh(x) (x) -#define be16toh(x) (x) -#define le32toh(x) \ - ((((x) & 0xff) << 24) | \ - (((x) & 0xff00) << 8) | \ - (((x) & 0xff0000) >> 8) | \ - (((x) & 0xff000000) >> 24)) -#define le64toh(x) \ - ((((x) & 0x00000000000000ffL) << 56) | \ - (((x) & 0x000000000000ff00L) << 40) | \ - (((x) & 0x0000000000ff0000L) << 24) | \ - (((x) & 0x00000000ff000000L) << 8) | \ - (((x) & 0x000000ff00000000L) >> 8) | \ - (((x) & 0x0000ff0000000000L) >> 24) | \ - (((x) & 0x00ff000000000000L) >> 40) | \ - (((x) & 0xff00000000000000L) >> 56)) -#else - #include -#endif - - - -/* - * On Solaris, be64toh is a function, not a macro, so there's no need to error - * if it's not defined. - */ -#if !defined(__sun) && !defined(be64toh) -#error Missing definition for be64toh -#endif - -#ifndef be32toh -#define be32toh(x) ntohl(x) -#endif - -#ifndef be16toh -#define be16toh(x) ntohs(x) -#endif - -#ifndef htobe64 -#define htobe64(x) be64toh(x) -#endif -#ifndef htobe32 -#define htobe32(x) be32toh(x) -#endif -#ifndef htobe16 -#define htobe16(x) be16toh(x) -#endif - -#ifndef htole32 -#define htole32(x) le32toh(x) -#endif - -#endif /* _ENDIANPORTABLE_H_ */ diff --git a/libs/visor_utils/utils.cpp b/libs/visor_utils/utils.cpp index 7702801ea..4d16ea96b 100644 --- a/libs/visor_utils/utils.cpp +++ b/libs/visor_utils/utils.cpp @@ -1,5 +1,4 @@ #include "utils.h" -#include "EndianPortable.h" #include #include #include @@ -17,13 +16,7 @@ static void split(const std::string &s, char delim, Out result) } } -static uint8_t reverse_bits(uint8_t n) -{ - static constexpr std::array bit_reverse_masks{0, 128, 192, 224, 240, 248, 252, 254, 255}; - return bit_reverse_masks[n]; -} - -std::optional match_subnet(IPv4subnetList &ipv4_list, uint32_t ipv4_val) +std::pair match_subnet(IPv4subnetList &ipv4_list, uint32_t ipv4_val) { if (ipv4_val && !ipv4_list.empty()) { in_addr ipv4{}; @@ -31,17 +24,18 @@ std::optional match_subnet(IPv4subnetList &ipv4_ for (IPv4subnetList::const_iterator it = ipv4_list.begin(); it != ipv4_list.end(); ++it) { uint8_t cidr = it->cidr; if (cidr == 0) { - return it; + return {true, it}; } - if (!get_subnet((ipv4.s_addr ^ it->addr.s_addr), cidr)) { - return it; + uint32_t mask = htonl((0xFFFFFFFFu) << (32 - cidr)); + if (!((ipv4.s_addr ^ it->addr.s_addr) & mask)) { + return {true, it}; } } } - return std::nullopt; + return {false, IPv4subnetList::const_iterator()}; } -std::optional match_subnet(IPv6subnetList &ipv6_list, const uint8_t *ipv6_val) +std::pair match_subnet(IPv6subnetList &ipv6_list, const uint8_t *ipv6_val) { if (ipv6_val && !ipv6_list.empty()) { in6_addr ipv6{}; @@ -61,11 +55,11 @@ std::optional match_subnet(IPv6subnetList &ipv6_ result = subSubnetByte == subThisByte; } if (result) { - return it; + return {true, it}; } } } - return std::nullopt; + return {false, IPv6subnetList::const_iterator()}; } bool match_subnet(IPv4subnetList &ipv4_list, IPv6subnetList &ipv6_list, const std::string &ip_val) @@ -73,58 +67,13 @@ bool match_subnet(IPv4subnetList &ipv4_list, IPv6subnetList &ipv6_list, const st pcpp::IPv4Address ipv4; pcpp::IPv6Address ipv6; if (ipv4 = pcpp::IPv4Address(ip_val); ipv4.isValid()) { - return match_subnet(ipv4_list, ipv4.toInt()).has_value(); + return match_subnet(ipv4_list, ipv4.toInt()).first; } else if (ipv6 = pcpp::IPv6Address(ip_val); ipv6.isValid()) { - return match_subnet(ipv6_list, ipv6.toBytes()).has_value(); + return match_subnet(ipv6_list, ipv6.toBytes()).first; } return false; } -uint8_t get_cidr(uint32_t mask) -{ - uint8_t cidr{0}; - while (mask > 0) { - mask >>= 1; - cidr++; - } - return cidr; -} - -uint8_t get_cidr(uint8_t *addr, size_t size) -{ - uint8_t cidr{0}; - for (size_t i = 0; i < size; i++) { - while (addr[i]) { - addr[i] >>= 1; - cidr++; - } - } - return cidr; -} - -uint32_t get_subnet(uint32_t addr, uint8_t cidr) -{ - return addr & (htobe32((0xFFFFFFFFu) << (32 - cidr))); -} - -std::array get_subnet(const uint8_t *addr, uint8_t cidr) -{ - std::array mask{}; - std::memcpy(mask.data(), addr, mask.size()); - uint8_t byte_count = cidr / 8; - uint8_t bit_count = cidr % 8; - for (uint8_t b = mask.size(); b-- > 0;) { - if (b > byte_count) { - mask[b] = 0; - } else if (b == byte_count && bit_count) { - mask[b] &= reverse_bits(bit_count); - } else if (b == byte_count) { - mask[b] = 0; - } - } - return mask; -} - void parse_host_specs(const std::vector &host_list, IPv4subnetList &ipv4_list, IPv6subnetList &ipv6_list) { for (const auto &host : host_list) { diff --git a/libs/visor_utils/utils.h b/libs/visor_utils/utils.h index 1488bc1d5..32ef304d0 100644 --- a/libs/visor_utils/utils.h +++ b/libs/visor_utils/utils.h @@ -1,6 +1,6 @@ /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ +* License, v. 2.0. If a copy of the MPL was not distributed with this +* file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #pragma once @@ -19,8 +19,6 @@ #ifdef __GNUC__ #pragma GCC diagnostic pop #endif -#include -#include #include #include #include @@ -60,11 +58,7 @@ bool ipv6_to_sockaddr(const pcpp::IPv6Address &ip, struct sockaddr_in6 *sa); std::vector split_str_to_vec_str(const std::string &spec, const char &delimiter); void parse_host_specs(const std::vector &host_list, IPv4subnetList &ipv4_list, IPv6subnetList &ipv6_list); -std::optional match_subnet(IPv4subnetList &ipv4_list, uint32_t ipv4_val); -std::optional match_subnet(IPv6subnetList &ipv6_list, const uint8_t *ipv6_val); +std::pair match_subnet(IPv4subnetList &ipv4_list, uint32_t ipv4_val); +std::pair match_subnet(IPv6subnetList &ipv6_list, const uint8_t *ipv6_val); bool match_subnet(IPv4subnetList &ipv4_list, IPv6subnetList &ipv6_list, const std::string &ip_val); -uint8_t get_cidr(uint32_t mask); -uint8_t get_cidr(uint8_t *addr, size_t size); -uint32_t get_subnet(uint32_t addr, uint8_t cidr); -std::array get_subnet(const uint8_t *addr, uint8_t cidr); } \ No newline at end of file diff --git a/src/AbstractMetricsManager.h b/src/AbstractMetricsManager.h index 29940be27..81ba7167e 100644 --- a/src/AbstractMetricsManager.h +++ b/src/AbstractMetricsManager.h @@ -218,7 +218,6 @@ class AbstractMetricsBucket virtual void to_json(json &j) const = 0; virtual void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const = 0; - virtual void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const = 0; virtual void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) = 0; }; @@ -530,53 +529,6 @@ class AbstractMetricsManager _metric_buckets.at(period)->to_prometheus(out, add_labels); } - void window_single_opentelemetry(metrics::v1::ScopeMetrics &scope, uint64_t period = 0, Metric::LabelMap add_labels = {}) const - { - std::shared_lock rl(_base_mutex); - std::shared_lock rbl(_bucket_mutex); - - if (period >= _num_periods) { - std::stringstream err; - err << "invalid metrics period, specify [0, " << _num_periods - 1 << "]"; - throw PeriodException(err.str()); - } - if (period >= _metric_buckets.size()) { - std::stringstream err; - err << "requested metrics period has not yet accumulated, current range is [0, " << _metric_buckets.size() - 1 << "]"; - throw PeriodException(err.str()); - } - - if (_groups && _groups->none()) { - return; - } - - if (!_tap_name.empty() && add_labels.find("tap") == add_labels.end()) { - add_labels["tap"] = _tap_name; - } - auto bucket = _metric_buckets.at(period).get(); - auto start_ts = bucket->start_tstamp(); - auto end_ts = bucket->end_tstamp(); - if (!end_ts.tv_sec) { - timespec_get(&end_ts, TIME_UTC); - } - bucket->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - void window_external_opentelemetry(metrics::v1::ScopeMetrics &scope, AbstractMetricsBucket *bucket, Metric::LabelMap add_labels = {}) const - { - if (_groups && _groups->none()) { - return; - } - // static because caller guarantees only our own bucket type - auto sbucket = static_cast(bucket); - auto start_ts = sbucket->start_tstamp(); - auto end_ts = sbucket->end_tstamp(); - if (!end_ts.tv_sec) { - timespec_get(&end_ts, TIME_UTC); - } - sbucket->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - void window_external_prometheus(std::stringstream &out, AbstractMetricsBucket *bucket, Metric::LabelMap add_labels = {}) const { if (_groups && _groups->none()) { @@ -638,6 +590,8 @@ class AbstractMetricsManager merged.merge(*m); } + std::string period_str = std::to_string(period) + "m"; + j[key]["period"]["start_ts"] = merged.start_tstamp().tv_sec; j[key]["period"]["length"] = merged.period_length(); diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6ce187312..0e2a5f5aa 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -5,10 +5,6 @@ find_package(Corrade REQUIRED PluginManager) set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) -find_package(Protobuf REQUIRED) -set(OTELCPP_PROTO_PATH ${CONAN_OPENTELEMETRY-PROTO_ROOT}/res) -include(opentelemetry-proto) - add_library(visor-core AbstractModule.cpp AbstractPlugin.cpp @@ -29,7 +25,6 @@ target_include_directories(visor-core ${CMAKE_CURRENT_BINARY_DIR} # Visor::Core config.h INTERFACE $ - $ # for generated otel proto headers ) target_link_libraries(visor-core @@ -37,7 +32,6 @@ target_link_libraries(visor-core datasketches rng timer - opentelemetry_proto ${CONAN_LIBS_LIBMAXMINDDB} ${CONAN_LIBS_CORRADE} ${CONAN_LIBS_SPDLOG} diff --git a/src/CoreRegistry.cpp b/src/CoreRegistry.cpp index 408f8c1dd..774536d8c 100644 --- a/src/CoreRegistry.cpp +++ b/src/CoreRegistry.cpp @@ -122,11 +122,8 @@ void CoreRegistry::configure_from_yaml(YAML::Node &node) if (!node.IsMap() || !node["visor"]) { throw ConfigException("invalid schema"); } - - if (!node["version"]) { - _logger->info("missing version, using version \"1.0\""); - } else if (!node["version"].IsScalar() || node["version"].as() != "1.0") { - throw ConfigException("unsupported version"); + if (!node["version"] || !node["version"].IsScalar() || node["version"].as() != "1.0") { + throw ConfigException("missing or unsupported version"); } // taps diff --git a/src/CoreServer.cpp b/src/CoreServer.cpp index bcdf252f3..ded7271eb 100644 --- a/src/CoreServer.cpp +++ b/src/CoreServer.cpp @@ -16,7 +16,7 @@ namespace visor { -visor::CoreServer::CoreServer(CoreRegistry *r, std::shared_ptr logger, const HttpConfig &http_config, const OtelConfig &otel_config, const PrometheusConfig &prom_config) +visor::CoreServer::CoreServer(CoreRegistry *r, std::shared_ptr logger, const HttpConfig &http_config, const PrometheusConfig &prom_config) : _svr(http_config) , _registry(r) , _logger(logger) @@ -30,10 +30,6 @@ visor::CoreServer::CoreServer(CoreRegistry *r, std::shared_ptr l _registry->start(&_svr); - if (otel_config.enable) { - _otel = std::make_unique(otel_config); - } - _setup_routes(prom_config); if (!prom_config.instance_label.empty()) { @@ -442,20 +438,5 @@ void CoreServer::_setup_routes(const PrometheusConfig &prom_config) res.set_content(output.str(), "text/plain"); } }); - if (_otel) { - _otel->OnInterval([&](metrics::v1::ResourceMetrics &resource) { - for (const auto &p_mname : _registry->policy_manager()->module_get_keys()) { - try { - auto [policy, lock] = _registry->policy_manager()->module_get_locked(p_mname); - auto scope = resource.add_scope_metrics(); - scope->mutable_scope()->set_name(p_mname); - policy->opentelemetry_metrics(*scope); - } catch (const std::exception &) { - return false; - } - } - return true; - }); - } } } diff --git a/src/CoreServer.h b/src/CoreServer.h index 999520916..3f925a882 100644 --- a/src/CoreServer.h +++ b/src/CoreServer.h @@ -6,7 +6,6 @@ #include "CoreRegistry.h" #include "HttpServer.h" -#include "OpenTelemetry.h" #include #include @@ -26,11 +25,10 @@ class CoreServer std::shared_ptr _logger; std::chrono::system_clock::time_point _start_time; - std::unique_ptr _otel; void _setup_routes(const PrometheusConfig &prom_config); public: - CoreServer(CoreRegistry *registry, std::shared_ptr logger, const HttpConfig &http_config, const OtelConfig &otel_config, const PrometheusConfig &prom_config); + CoreServer(CoreRegistry *registry, std::shared_ptr logger, const HttpConfig &http_config, const PrometheusConfig &prom_config); ~CoreServer(); void start(const std::string &host, int port); diff --git a/src/GeoDB.cpp b/src/GeoDB.cpp index f898e0abc..101435ad8 100644 --- a/src/GeoDB.cpp +++ b/src/GeoDB.cpp @@ -8,23 +8,24 @@ namespace visor::geo { -std::ostream &operator<<(std::ostream &os, const City &c) -{ - return os << c.location; -} - MaxmindDB &GeoIP() { - static MaxmindDB ip_db(MaxmindDB::Type::Geo); + static MaxmindDB ip_db; return ip_db; } MaxmindDB &GeoASN() { - static MaxmindDB asn_db(MaxmindDB::Type::Asn); + static MaxmindDB asn_db; return asn_db; } +std::ostream &operator<<(std::ostream &os, const visor::geo::City &c) +{ + os << c.location; + return os; +} + void MaxmindDB::enable(const std::string &database_filename, int cache_size) { auto status = MMDB_open(database_filename.c_str(), MMDB_MODE_MMAP, &_mmdb); @@ -33,11 +34,7 @@ void MaxmindDB::enable(const std::string &database_filename, int cache_size) throw std::runtime_error(msg); } if (cache_size != 0) { - if (_type == Type::Geo) { - _lru_geo_cache = std::make_unique>(cache_size); - } else if (_type == Type::Asn) { - _lru_asn_cache = std::make_unique>(cache_size); - } + _lru_cache = std::make_unique>>(cache_size); } _enabled = true; } @@ -53,7 +50,7 @@ City MaxmindDB::getGeoLoc(const struct sockaddr *sa) const { if (!_enabled) { - return {}; + return City(); } int mmdb_error; @@ -70,15 +67,15 @@ City MaxmindDB::getGeoLoc(const struct sockaddr_in *sa4) const { if (!_enabled) { - return {}; + return City(); } std::string ip_address; - if (_lru_geo_cache) { + if (_lru_cache) { ip_address = fmt::format_int(sa4->sin_addr.s_addr).str(); std::shared_lock lock(_cache_mutex); - if (auto geoloc = _lru_geo_cache->getValue(ip_address); geoloc.has_value()) { - return geoloc.value(); + if (auto geoloc = _lru_cache->getValue(ip_address); geoloc.has_value()) { + return std::get(geoloc.value()); } } @@ -86,17 +83,17 @@ City MaxmindDB::getGeoLoc(const struct sockaddr_in *sa4) const MMDB_lookup_result_s lookup = MMDB_lookup_sockaddr(&_mmdb, reinterpret_cast(sa4), &mmdb_error); if (mmdb_error != MMDB_SUCCESS || !lookup.found_entry) { - if (_lru_geo_cache) { + if (_lru_cache) { std::unique_lock lock(_cache_mutex); - _lru_geo_cache->put(ip_address, City{"Unknown", std::string(), std::string()}); + _lru_cache->put(ip_address, City{"Unknown", std::string(), std::string()}); } return City{"Unknown", std::string(), std::string()}; } - if (_lru_geo_cache) { + if (_lru_cache) { auto geoloc = _getGeoLoc(&lookup); std::unique_lock lock(_cache_mutex); - _lru_geo_cache->put(ip_address, geoloc); + _lru_cache->put(ip_address, geoloc); return geoloc; } @@ -107,15 +104,15 @@ City MaxmindDB::getGeoLoc(const struct sockaddr_in6 *sa6) const { if (!_enabled) { - return {}; + return City(); } std::string ip_address; - if (_lru_geo_cache) { + if (_lru_cache) { ip_address = fmt::format("{}", sa6->sin6_addr.s6_addr); std::shared_lock lock(_cache_mutex); - if (auto geoloc = _lru_geo_cache->getValue(ip_address); geoloc.has_value()) { - return geoloc.value(); + if (auto geoloc = _lru_cache->getValue(ip_address); geoloc.has_value()) { + return std::get(geoloc.value()); } } @@ -123,17 +120,17 @@ City MaxmindDB::getGeoLoc(const struct sockaddr_in6 *sa6) const MMDB_lookup_result_s lookup = MMDB_lookup_sockaddr(&_mmdb, reinterpret_cast(sa6), &mmdb_error); if (mmdb_error != MMDB_SUCCESS || !lookup.found_entry) { - if (_lru_geo_cache) { + if (_lru_cache) { std::unique_lock lock(_cache_mutex); - _lru_geo_cache->put(ip_address, City{"Unknown", std::string(), std::string()}); + _lru_cache->put(ip_address, City{"Unknown", std::string(), std::string()}); } return City{"Unknown", std::string(), std::string()}; } - if (_lru_geo_cache) { + if (_lru_cache) { auto geoloc = _getGeoLoc(&lookup); std::unique_lock lock(_cache_mutex); - _lru_geo_cache->put(ip_address, geoloc); + _lru_cache->put(ip_address, geoloc); return geoloc; } @@ -144,13 +141,13 @@ City MaxmindDB::getGeoLoc(const char *ip_address) const { if (!_enabled) { - return {}; + return City(); } - if (_lru_geo_cache) { + if (_lru_cache) { std::shared_lock lock(_cache_mutex); - if (auto geoloc = _lru_geo_cache->getValue(ip_address); geoloc.has_value()) { - return geoloc.value(); + if (auto geoloc = _lru_cache->getValue(ip_address); geoloc.has_value()) { + return std::get(geoloc.value()); } } @@ -158,17 +155,17 @@ City MaxmindDB::getGeoLoc(const char *ip_address) const MMDB_lookup_result_s lookup = MMDB_lookup_string(&_mmdb, ip_address, &gai_error, &mmdb_error); if (0 != gai_error || MMDB_SUCCESS != mmdb_error || !lookup.found_entry) { - if (_lru_geo_cache) { + if (_lru_cache) { std::unique_lock lock(_cache_mutex); - _lru_geo_cache->put(ip_address, City{"Unknown", std::string(), std::string()}); + _lru_cache->put(ip_address, City{"Unknown", std::string(), std::string()}); } return City{"Unknown", std::string(), std::string()}; } - if (_lru_geo_cache) { + if (_lru_cache) { auto geoloc = _getGeoLoc(&lookup); std::unique_lock lock(_cache_mutex); - _lru_geo_cache->put(ip_address, geoloc); + _lru_cache->put(ip_address, geoloc); return geoloc; } @@ -246,7 +243,7 @@ std::string MaxmindDB::getASNString(const struct sockaddr *sa) const { if (!_enabled) { - return {}; + return ""; } int mmdb_error; @@ -263,14 +260,14 @@ std::string MaxmindDB::getASNString(const struct sockaddr_in *sa4) const { if (!_enabled) { - return {}; + return ""; } std::string ip_address; - if (_lru_asn_cache) { + if (_lru_cache) { ip_address = fmt::format_int(sa4->sin_addr.s_addr).str(); std::shared_lock lock(_cache_mutex); - if (auto asn = _lru_asn_cache->getValue(ip_address); asn.has_value()) { - return asn.value(); + if (auto asn = _lru_cache->getValue(ip_address); asn.has_value()) { + return std::get(asn.value()); } } @@ -278,17 +275,17 @@ std::string MaxmindDB::getASNString(const struct sockaddr_in *sa4) const MMDB_lookup_result_s lookup = MMDB_lookup_sockaddr(&_mmdb, reinterpret_cast(sa4), &mmdb_error); if (mmdb_error != MMDB_SUCCESS || !lookup.found_entry) { - if (_lru_asn_cache) { + if (_lru_cache) { std::unique_lock lock(_cache_mutex); - _lru_asn_cache->put(ip_address, "Unknown"); + _lru_cache->put(ip_address, "Unknown"); } return "Unknown"; } - if (_lru_asn_cache) { + if (_lru_cache) { auto asn = _getASNString(&lookup); std::unique_lock lock(_cache_mutex); - _lru_asn_cache->put(ip_address, asn); + _lru_cache->put(ip_address, asn); return asn; } @@ -299,15 +296,15 @@ std::string MaxmindDB::getASNString(const struct sockaddr_in6 *sa6) const { if (!_enabled) { - return {}; + return ""; } std::string ip_address; - if (_lru_asn_cache) { + if (_lru_cache) { ip_address = fmt::format("{}", sa6->sin6_addr.s6_addr); std::shared_lock lock(_cache_mutex); - if (auto asn = _lru_asn_cache->getValue(ip_address); asn.has_value()) { - return asn.value(); + if (auto asn = _lru_cache->getValue(ip_address); asn.has_value()) { + return std::get(asn.value()); } } @@ -315,17 +312,17 @@ std::string MaxmindDB::getASNString(const struct sockaddr_in6 *sa6) const MMDB_lookup_result_s lookup = MMDB_lookup_sockaddr(&_mmdb, reinterpret_cast(sa6), &mmdb_error); if (mmdb_error != MMDB_SUCCESS || !lookup.found_entry) { - if (_lru_asn_cache) { + if (_lru_cache) { std::unique_lock lock(_cache_mutex); - _lru_asn_cache->put(ip_address, "Unknown"); + _lru_cache->put(ip_address, "Unknown"); } return "Unknown"; } - if (_lru_asn_cache) { + if (_lru_cache) { auto asn = _getASNString(&lookup); std::unique_lock lock(_cache_mutex); - _lru_asn_cache->put(ip_address, asn); + _lru_cache->put(ip_address, asn); return asn; } @@ -336,13 +333,13 @@ std::string MaxmindDB::getASNString(const char *ip_address) const { if (!_enabled) { - return {}; + return ""; } - if (_lru_asn_cache) { + if (_lru_cache) { std::shared_lock lock(_cache_mutex); - if (auto asn = _lru_asn_cache->getValue(ip_address); asn.has_value()) { - return asn.value(); + if (auto asn = _lru_cache->getValue(ip_address); asn.has_value()) { + return std::get(asn.value()); } } @@ -350,17 +347,17 @@ std::string MaxmindDB::getASNString(const char *ip_address) const MMDB_lookup_result_s lookup = MMDB_lookup_string(&_mmdb, ip_address, &gai_error, &mmdb_error); if (0 != gai_error || MMDB_SUCCESS != mmdb_error || !lookup.found_entry) { - if (_lru_asn_cache) { + if (_lru_cache) { std::unique_lock lock(_cache_mutex); - _lru_asn_cache->put(ip_address, "Unknown"); + _lru_cache->put(ip_address, "Unknown"); } return "Unknown"; } - if (_lru_asn_cache) { + if (_lru_cache) { auto asn = _getASNString(&lookup); std::unique_lock lock(_cache_mutex); - _lru_asn_cache->put(ip_address, asn); + _lru_cache->put(ip_address, asn); return asn; } diff --git a/src/GeoDB.h b/src/GeoDB.h index 672f3dc67..8000670b5 100644 --- a/src/GeoDB.h +++ b/src/GeoDB.h @@ -8,8 +8,8 @@ #pragma GCC diagnostic ignored "-Wpedantic" #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif -#include #include +#include #ifdef __GNUC__ #pragma GCC diagnostic pop #endif @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "VisorLRUList.h" @@ -33,28 +33,19 @@ struct City { bool operator==(const City &other) const { - return std::tie(location, latitude, longitude) == std::tie(other.location, other.latitude, other.longitude); - } - bool operator!=(const City &other) const - { - return !(*this == other); + return (location == other.location + && latitude == other.latitude + && longitude == other.longitude); } -}; -std::ostream &operator<<(std::ostream &os, const visor::geo::City &c); + friend std::ostream &operator<<(std::ostream &os, const City &c); +}; class MaxmindDB { static constexpr size_t DEFAULT_CACHE_SIZE = 10000; public: - enum class Type { - Asn, - Geo - }; - - MaxmindDB(Type type) - : _type(type){}; ~MaxmindDB(); void enable(const std::string &database_filename, int cache_size = DEFAULT_CACHE_SIZE); @@ -77,11 +68,9 @@ class MaxmindDB std::string getASNString(const struct sockaddr_in6 *sa6) const; private: - Type _type; mutable MMDB_s _mmdb; bool _enabled = false; - std::unique_ptr> _lru_geo_cache; - std::unique_ptr> _lru_asn_cache; + std::unique_ptr>> _lru_cache; mutable std::shared_mutex _cache_mutex; City _getGeoLoc(MMDB_lookup_result_s *lookup) const; diff --git a/src/HttpServer.h b/src/HttpServer.h index 1394642b0..8a0a62a2c 100644 --- a/src/HttpServer.h +++ b/src/HttpServer.h @@ -10,11 +10,6 @@ #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" #endif -#ifdef __APPLE__ -#ifndef NI_MAXHOST -#define NI_MAXHOST 1025 -#endif -#endif #include #ifdef __GNUC__ #pragma GCC diagnostic pop diff --git a/src/IpPort.cpp b/src/IpPort.cpp index ae66cc4db..e8f70b787 100644 --- a/src/IpPort.cpp +++ b/src/IpPort.cpp @@ -11,26 +11,6 @@ std::ostream &operator<<(std::ostream &os, const IpPort &p) std::map IpPort::ports_tcp_list; std::map IpPort::ports_udp_list; -std::string IpPort::get_service(uint16_t port, Protocol proto) -{ - std::map::iterator it; - if (proto == Protocol::TCP) { - it = ports_tcp_list.lower_bound(port); - if (it == ports_tcp_list.end()) { - return std::to_string(port); - } - } else if (proto == Protocol::UDP) { - it = ports_udp_list.lower_bound(port); - if (it == ports_udp_list.end()) { - return std::to_string(port); - } - } - if ((it->first == port) || (port >= it->second.lower_bound)) { - return it->second.name; - } - return std::to_string(port); -} - void IpPort::set_csv_iana_ports(std::string path) { io::CSVReader<3> in(path); diff --git a/src/IpPort.h b/src/IpPort.h index bedd177e1..56c4a9cfb 100644 --- a/src/IpPort.h +++ b/src/IpPort.h @@ -39,7 +39,6 @@ struct IpPort { std::string get_service() const; - static std::string get_service(uint16_t port, Protocol proto); static void set_csv_iana_ports(std::string path); bool operator==(const IpPort &other) const diff --git a/src/Metrics.cpp b/src/Metrics.cpp index 149423d62..fb15486d7 100644 --- a/src/Metrics.cpp +++ b/src/Metrics.cpp @@ -19,22 +19,6 @@ void Counter::to_prometheus(std::stringstream &out, Metric::LabelMap add_labels) out << name_snake({}, add_labels) << ' ' << _value << std::endl; } -void Counter::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels) const -{ - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto gauge_data_point = metric->mutable_gauge()->add_data_points(); - gauge_data_point->set_as_int(_value); - gauge_data_point->set_start_time_unix_nano(timespec_to_uint64(start)); - gauge_data_point->set_time_unix_nano(timespec_to_uint64(end)); - for (const auto &label: add_labels) { - auto attribute = gauge_data_point->add_attributes(); - attribute->set_key(label.first); - attribute->mutable_value()->set_string_value(label.second); - } -} - void Rate::to_json(json &j, bool include_live) const { to_json(j); @@ -55,12 +39,6 @@ void Rate::to_prometheus(std::stringstream &out, Metric::LabelMap add_labels) co _quantile.to_prometheus(out, add_labels); } -void Rate::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels) const -{ - std::shared_lock lock(_sketch_mutex); - _quantile.to_opentelemetry(scope, start, end, add_labels); -} - void Cardinality::merge(const Cardinality &other) { datasketches::cpc_union merge_set; @@ -79,22 +57,6 @@ void Cardinality::to_prometheus(std::stringstream &out, Metric::LabelMap add_lab out << name_snake({}, add_labels) << ' ' << lround(_set.get_estimate()) << std::endl; } -void Cardinality::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels) const -{ - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto gauge_data_point = metric->mutable_gauge()->add_data_points(); - gauge_data_point->set_as_int(lround(_set.get_estimate())); - gauge_data_point->set_start_time_unix_nano(timespec_to_uint64(start)); - gauge_data_point->set_time_unix_nano(timespec_to_uint64(end)); - for (const auto &label: add_labels) { - auto attribute = gauge_data_point->add_attributes(); - attribute->set_key(label.first); - attribute->mutable_value()->set_string_value(label.second); - } -} - // static storage for base labels Metric::LabelMap Metric::_static_labels; diff --git a/src/Metrics.h b/src/Metrics.h index 8bb21d4ec..99e7f281f 100644 --- a/src/Metrics.h +++ b/src/Metrics.h @@ -4,7 +4,6 @@ #pragma once #include -#include #include #include #ifdef __GNUC__ @@ -21,21 +20,14 @@ #pragma GCC diagnostic pop #endif #include -#include #include #include #include #include -#define HIST_MIN_EXP -9 -#define HIST_MAX_EXP 18 -#define HIST_LOG_BUCK 18 -#define HIST_N_BUCKETS (HIST_LOG_BUCK * (HIST_MAX_EXP - HIST_MIN_EXP)) - namespace visor { using json = nlohmann::json; -using namespace opentelemetry::proto; using namespace std::chrono; struct comparator { @@ -50,11 +42,6 @@ struct comparator { } }; -static inline uint64_t timespec_to_uint64(timespec &stamp) -{ - return stamp.tv_sec * 1000000000ULL + stamp.tv_nsec; -} - class Metric { public: @@ -62,7 +49,8 @@ class Metric enum class Aggregate { DEFAULT, - SUM + SUM, + SUMMARY }; private: @@ -101,7 +89,7 @@ class Metric virtual ~Metric() = default; - virtual void set_info(std::string schema_key, std::initializer_list names, const std::string &desc) + void set_info(std::string schema_key, std::initializer_list names, const std::string &desc) { _name.clear(); _name = names; @@ -123,7 +111,6 @@ class Metric virtual void to_json(json &j) const = 0; virtual void to_prometheus(std::stringstream &out, LabelMap add_labels = {}) const = 0; - virtual void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const = 0; }; /** @@ -169,7 +156,6 @@ class Counter final : public Metric // Metric void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const override; }; /** @@ -181,36 +167,18 @@ template class Histogram final : public Metric { static_assert(std::is_integral::value || std::is_floating_point::value); - - static constexpr T _get_pace() - { - if constexpr (std::is_integral::value) { - return 1; - } else { - return 0.0000001; - } - } - - // calculated at compile time - static constexpr std::pair, size_t> _get_boundaries() - { - auto pace = _get_pace(); - std::array boundaries{}; - size_t index = 0; - for (auto exponent = HIST_MIN_EXP; exponent < HIST_MAX_EXP; exponent++) { - for (auto buckets = 0; buckets < HIST_LOG_BUCK; buckets++) { - boundaries[index++] = static_cast((std::pow(10.0, static_cast(buckets) / HIST_LOG_BUCK) * std::pow(10.0, exponent)) + pace); - } - } - auto itr = std::unique(boundaries.begin(), boundaries.end()); - return {boundaries, std::distance(boundaries.begin(), itr)}; - } + T _pace; datasketches::kll_sketch _sketch; public: Histogram(std::string schema_key, std::initializer_list names, std::string desc) : Metric(schema_key, names, std::move(desc)) { + if constexpr (std::is_integral::value) { + _pace = 1; + } else { + _pace = 0.0000001; + } } void update(const T &value) @@ -249,20 +217,22 @@ class Histogram final : public Metric if (_sketch.is_empty()) { return; } - auto bins_pmf = _get_boundaries(); - auto histogram_pmf = _sketch.get_PMF(bins_pmf.first.data(), bins_pmf.second); - std::vector bins; - for (size_t i = 0; i < bins_pmf.second; ++i) { - if (histogram_pmf[i]) { - bins.push_back(bins_pmf.first[i]); + std::size_t split_point_size = 1 + static_cast(std::log2(_sketch.get_n())); // Sturge’s Rule + T step = _sketch.get_max_value() / split_point_size; + T split_point = step; + std::unique_ptr bins = std::make_unique(split_point_size); + for (std::size_t i = 0; i < split_point_size; ++i) { + bins[i] = split_point + _pace; + split_point += step; + if (split_point > _sketch.get_max_value()) { + split_point = _sketch.get_max_value(); } } - auto histogram = _sketch.get_CDF(bins.data(), bins.size()); - auto pace = _get_pace(); - for (std::size_t i = 0; i < bins.size(); ++i) { - name_json_assign(j, {"buckets", std::to_string(bins[i] - pace)}, histogram[i] * _sketch.get_n()); + auto histogram = _sketch.get_CDF(bins.get(), split_point_size); + for (std::size_t i = 0; i < split_point_size; ++i) { + name_json_assign(j, {"buckets", std::to_string(bins[i] - _pace)}, histogram[i] * _sketch.get_n()); } - name_json_assign(j, {"buckets", "+Inf"}, histogram[bins.size()] * _sketch.get_n()); + name_json_assign(j, {"buckets", "+Inf"}, histogram[split_point_size] * _sketch.get_n()); } void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override @@ -270,66 +240,31 @@ class Histogram final : public Metric if (_sketch.is_empty()) { return; } - auto bins_pmf = _get_boundaries(); - auto histogram_pmf = _sketch.get_PMF(bins_pmf.first.data(), bins_pmf.second); - std::vector bins; - for (size_t i = 0; i < bins_pmf.second; ++i) { - if (histogram_pmf[i]) { - bins.push_back(bins_pmf.first[i]); + std::size_t split_point_size = 1 + static_cast(std::log2(_sketch.get_n())); // Sturge’s Rule + T step = _sketch.get_max_value() / split_point_size; + T split_point = step; + std::unique_ptr bins = std::make_unique(split_point_size); + for (std::size_t i = 0; i < split_point_size; ++i) { + bins[i] = split_point + _pace; + split_point += step; + if (split_point > _sketch.get_max_value()) { + split_point = _sketch.get_max_value(); } } - auto histogram = _sketch.get_CDF(bins.data(), bins.size()); - auto pace = _get_pace(); + auto histogram = _sketch.get_CDF(bins.get(), split_point_size); + out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; out << "# TYPE " << base_name_snake() << " histogram" << std::endl; - for (std::size_t i = 0; i < bins.size(); ++i) { + for (std::size_t i = 0; i < split_point_size; ++i) { LabelMap le(add_labels); - le["le"] = std::to_string(bins[i] - pace); + le["le"] = std::to_string(bins[i] - _pace); out << name_snake({"bucket"}, le) << ' ' << histogram[i] * _sketch.get_n() << std::endl; } LabelMap le(add_labels); le["le"] = "+Inf"; - out << name_snake({"bucket"}, le) << ' ' << histogram[bins.size()] * _sketch.get_n() << std::endl; + out << name_snake({"bucket"}, le) << ' ' << histogram[split_point_size] * _sketch.get_n() << std::endl; out << name_snake({"count"}, add_labels) << ' ' << _sketch.get_n() << std::endl; } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const - { - if (_sketch.is_empty()) { - return; - } - auto bins_pmf = _get_boundaries(); - auto histogram_pmf = _sketch.get_PMF(bins_pmf.first.data(), bins_pmf.second); - std::vector bins; - for (size_t i = 0; i < bins_pmf.second; ++i) { - if (histogram_pmf[i]) { - bins.push_back(bins_pmf.first[i]); - } - } - auto histogram = _sketch.get_CDF(bins.data(), bins.size()); - auto pace = _get_pace(); - - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto m_hist = metric->mutable_histogram(); - m_hist->set_aggregation_temporality(metrics::v1::AggregationTemporality::AGGREGATION_TEMPORALITY_CUMULATIVE); - auto hist_data_point = m_hist->add_data_points(); - hist_data_point->set_start_time_unix_nano(timespec_to_uint64(start)); - hist_data_point->set_time_unix_nano(timespec_to_uint64(end)); - - for (std::size_t i = 0; i < bins.size(); ++i) { - hist_data_point->add_explicit_bounds(bins[i] - pace); - hist_data_point->add_bucket_counts(histogram[i] * _sketch.get_n()); - } - hist_data_point->set_count(_sketch.get_n()); - - for (const auto &label : add_labels) { - auto attribute = hist_data_point->add_attributes(); - attribute->set_key(label.first); - attribute->mutable_value()->set_string_value(label.second); - } - } }; /** @@ -419,10 +354,6 @@ class Quantile final : public Metric void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override { - if (_quantile.is_empty()) { - return; - } - std::vector quantiles; if (_quantiles_sum.empty()) { const double fractions[4]{0.50, 0.90, 0.95, 0.99}; @@ -451,38 +382,6 @@ class Quantile final : public Metric out << name_snake({"count"}, add_labels) << ' ' << _quantile.get_n() << std::endl; } } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const override - { - if (_quantile.is_empty()) { - return; - } - - std::vector quantiles; - const double fractions[4]{0.50, 0.90, 0.95, 0.99}; - if (_quantiles_sum.empty()) { - quantiles = _quantile.get_quantiles(fractions, 4); - } else { - quantiles = _quantiles_sum; - } - - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto summary_data_point = metric->mutable_summary()->add_data_points(); - summary_data_point->set_start_time_unix_nano(timespec_to_uint64(start)); - summary_data_point->set_time_unix_nano(timespec_to_uint64(end)); - for (auto it = quantiles.begin(); it != quantiles.end(); ++it) { - auto quantile = summary_data_point->add_quantile_values(); - quantile->set_quantile(fractions[it - quantiles.begin()]); - quantile->set_value(*it); - } - for (const auto &label : add_labels) { - auto attribute = summary_data_point->add_attributes(); - attribute->set_key(label.first); - attribute->mutable_value()->set_string_value(label.second); - } - } }; /** @@ -525,16 +424,20 @@ class TopN final : public Metric return quantile.get_quantile(_percentile_threshold); } - void _set_opentelemetry_data(opentelemetry::proto::metrics::v1::NumberDataPoint *data_point, uint64_t start, uint64_t end, const Metric::LabelMap &l, uint64_t value) const + auto _get_summarized_data(const std::vector::row> &items, std::function formatter, uint64_t threshold) const { - data_point->set_as_int(value); - data_point->set_start_time_unix_nano(start); - data_point->set_time_unix_nano(end); - for (const auto &label : l) { - auto attribute = data_point->add_attributes(); - attribute->set_key(label.first); - attribute->mutable_value()->set_string_value(label.second); + std::map summary; + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + if (items[i].get_estimate() >= threshold) { + auto [removed, not_exists] = summary.emplace(formatter(items[i].get_item()), items[i].get_estimate()); + if (!not_exists) { + summary[removed->first] += items[i].get_estimate(); + } + } else { + break; + } } + return std::set, comparator>(summary.begin(), summary.end()); } public: @@ -584,17 +487,27 @@ class TopN final : public Metric * @param j json object * @param formatter std::function which takes a T as input (the type store it in top table) it needs to return a std::string */ - void to_json(json &j, std::function formatter) const + void to_json(json &j, std::function formatter, Aggregate op = Aggregate::DEFAULT) const { auto section = json::array(); auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); auto threshold = _get_threshold(items); - for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { - if (items[i].get_estimate() >= threshold) { - section[i]["name"] = formatter(items[i].get_item()); - section[i]["estimate"] = items[i].get_estimate(); - } else { - break; + if (op == Aggregate::DEFAULT) { + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + if (items[i].get_estimate() >= threshold) { + section[i]["name"] = formatter(items[i].get_item()); + section[i]["estimate"] = items[i].get_estimate(); + } else { + break; + } + } + } else if (op == Aggregate::SUMMARY) { + auto sorted = _get_summarized_data(items, formatter, threshold); + uint64_t i = 0; + for (const auto &data : sorted) { + section[i]["name"] = data.first; + section[i]["estimate"] = data.second; + i++; } } name_json_assign(j, section); @@ -616,33 +529,35 @@ class TopN final : public Metric name_json_assign(j, section); } - void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels, std::function formatter) const + void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels, std::function formatter, Aggregate op = Aggregate::DEFAULT) const { - auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - if (!std::min(_top_count, items.size())) { - return; - } LabelMap l(add_labels); + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); auto threshold = _get_threshold(items); out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; out << "# TYPE " << base_name_snake() << " gauge" << std::endl; - for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { - if (items[i].get_estimate() >= threshold) { - l[_item_key] = formatter(items[i].get_item()); - out << name_snake({}, l) << ' ' << items[i].get_estimate() << std::endl; - } else { - break; + if (op == Aggregate::DEFAULT) { + for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { + if (items[i].get_estimate() >= threshold) { + l[_item_key] = formatter(items[i].get_item()); + out << name_snake({}, l) << ' ' << items[i].get_estimate() << std::endl; + } else { + break; + } + } + } else if (op == Aggregate::SUMMARY) { + auto sorted = _get_summarized_data(items, formatter, threshold); + for (const auto &data : sorted) { + l[_item_key] = data.first; + out << name_snake({}, l) << ' ' << data.second << std::endl; } } } void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels, std::function formatter) const { - auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - if (!std::min(_top_count, items.size())) { - return; - } LabelMap l(add_labels); + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); auto threshold = _get_threshold(items); out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; out << "# TYPE " << base_name_snake() << " gauge" << std::endl; @@ -675,11 +590,8 @@ class TopN final : public Metric void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override { - auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - if (!std::min(_top_count, items.size())) { - return; - } LabelMap l(add_labels); + auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); auto threshold = _get_threshold(items); out << "# HELP " << base_name_snake() << ' ' << _desc << std::endl; out << "# TYPE " << base_name_snake() << " gauge" << std::endl; @@ -694,77 +606,6 @@ class TopN final : public Metric } } } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const override - { - auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - if (!std::min(_top_count, items.size())) { - return; - } - LabelMap l(add_labels); - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto threshold = _get_threshold(items); - auto start_time = timespec_to_uint64(start); - auto end_time = timespec_to_uint64(end); - for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { - if (items[i].get_estimate() >= threshold) { - std::stringstream name_text; - name_text << items[i].get_item(); - l[_item_key] = name_text.str(); - _set_opentelemetry_data(metric->mutable_gauge()->add_data_points(), start_time, end_time, l, items[i].get_estimate()); - } else { - break; - } - } - } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels, std::function formatter) const - { - auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - if (!std::min(_top_count, items.size())) { - return; - } - LabelMap l(add_labels); - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto threshold = _get_threshold(items); - auto start_time = timespec_to_uint64(start); - auto end_time = timespec_to_uint64(end); - for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { - if (items[i].get_estimate() >= threshold) { - l[_item_key] = formatter(items[i].get_item()); - _set_opentelemetry_data(metric->mutable_gauge()->add_data_points(), start_time, end_time, l, items[i].get_estimate()); - } else { - break; - } - } - } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels, std::function formatter) const - { - auto items = _fi.get_frequent_items(datasketches::frequent_items_error_type::NO_FALSE_NEGATIVES); - if (!std::min(_top_count, items.size())) { - return; - } - LabelMap l(add_labels); - auto metric = scope.add_metrics(); - metric->set_name(base_name_snake()); - metric->set_description(_desc); - auto threshold = _get_threshold(items); - auto start_time = timespec_to_uint64(start); - auto end_time = timespec_to_uint64(end); - for (uint64_t i = 0; i < std::min(_top_count, items.size()); i++) { - if (items[i].get_estimate() >= threshold) { - formatter(l, _item_key, items[i].get_item()); - _set_opentelemetry_data(metric->mutable_gauge()->add_data_points(), start_time, end_time, l, items[i].get_estimate()); - } else { - break; - } - } - } }; /** @@ -804,7 +645,6 @@ class Cardinality final : public Metric // Metric void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const override; }; /** @@ -891,20 +731,9 @@ class Rate final : public Metric void to_json(json &j, bool include_live) const; - void set_info(std::string schema_key, std::initializer_list names, const std::string &desc) override - { - _name.clear(); - _name = names; - _desc = desc; - _schema_key = schema_key; - _check_names(); - _quantile.set_info(schema_key, names, desc); - } - // Metric void to_json(json &j) const override; - void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, LabelMap add_labels = {}) const override; }; + } \ No newline at end of file diff --git a/src/OpenTelemetry.h b/src/OpenTelemetry.h deleted file mode 100644 index 73e9e762d..000000000 --- a/src/OpenTelemetry.h +++ /dev/null @@ -1,71 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#pragma once - -#include "HttpServer.h" -#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" -#include -#include - -namespace visor { - -constexpr char BIN_CONTENT_TYPE[] = "application/x-protobuf"; - -using namespace opentelemetry::proto; - -struct OtelConfig { - bool enable{false}; - std::string endpoint{"localhost"}; - std::string path{"/v1/metrics"}; - uint32_t port_number{4318}; - uint64_t interval_sec{60}; - std::string tls_cert; - std::string tls_key; -}; - -class OpenTelemetry -{ - std::unique_ptr _client; - collector::metrics::v1::ExportMetricsServiceRequest _request; - metrics::v1::ResourceMetrics *_resource; - std::shared_ptr _timer_handle; - std::function _callback; - -public: - OpenTelemetry(const OtelConfig &config) - { - if (!config.tls_cert.empty() && !config.tls_key.empty()) { - _client = std::make_unique(config.endpoint, config.port_number, config.tls_cert, config.tls_key); - } else { - _client = std::make_unique(config.endpoint, config.port_number); - } - _resource = _request.add_resource_metrics(); - static timer timer_thread{std::chrono::seconds(config.interval_sec)}; - auto path = config.path; - _timer_handle = timer_thread.set_interval(std::chrono::seconds(config.interval_sec), [path, this] { - _resource->clear_scope_metrics(); - if (_callback && _callback(*_resource)) { - if (auto body_size = _request.ByteSizeLong(); body_size > sizeof(_request)) { - auto body = std::make_unique(body_size); - _request.SerializeToArray(body.get(), body_size); - auto result = _client->Post(path, body.get(), body_size, BIN_CONTENT_TYPE); - } - } - }); - } - - ~OpenTelemetry() - { - _timer_handle->cancel(); - _resource->clear_resource(); - _resource = nullptr; - } - - void OnInterval(std::function callback) - { - _callback = callback; - } -}; -} \ No newline at end of file diff --git a/src/Policies.cpp b/src/Policies.cpp index b292be7f2..b393be2d6 100644 --- a/src/Policies.cpp +++ b/src/Policies.cpp @@ -124,7 +124,6 @@ std::vector PolicyManager::load(const YAML::Node &policy_yaml, bool si handler_modules.back()->set_event_proxy(input_ptr->create_event_proxy(Configurable())); handler_module = handler_plugin->second->instantiate(handler_name, handler_modules.back()->get_event_proxy(), &handler_config.config, &handler_config.filter); } - handler_module->set_version(handler_config.version); policy_ptr->add_module(handler_module.get()); handler_modules.emplace_back(std::move(handler_module)); } @@ -398,25 +397,6 @@ void Policy::prometheus_metrics(std::stringstream &out) } } -void Policy::opentelemetry_metrics(metrics::v1::ScopeMetrics &scope) -{ - if (_merge_like_handlers) { - auto bucket_map = _get_merged_buckets(); - for (auto &[bucket, hmod] : bucket_map) { - hmod->window_opentelemetry(scope, bucket.get(), {{"policy", name()}, {"handler", hmod->schema_key() + "_merged"}}); - } - } else { - for (auto &mod : modules()) { - auto hmod = dynamic_cast(mod); - if (hmod) { - spdlog::stopwatch sw; - hmod->window_opentelemetry(scope, {{"policy", name()}, {"handler", hmod->name()}}); - spdlog::get("visor")->debug("{} window_opentelemetry elapsed time: {}", hmod->name(), sw); - } - } - } -} - Policy::BucketMap Policy::_get_merged_buckets(bool prometheus, uint64_t period, bool merged) { BucketMap bucket_map; @@ -429,8 +409,7 @@ Policy::BucketMap Policy::_get_merged_buckets(bool prometheus, uint64_t period, } for (auto &[bucket, handler] : bucket_map) { bool is_last = (bucket == std::prev(bucket_map.end())->first); - if (!is_last && (hmod->schema_key() != handler->schema_key()) - && (hmod->version() != handler->version())) { + if (hmod->schema_key() != handler->schema_key() && !is_last) { continue; } auto new_bucket = hmod->merge(bucket.get(), period, prometheus, merged); diff --git a/src/Policies.h b/src/Policies.h index b20fc0cf3..a2bb093dd 100644 --- a/src/Policies.h +++ b/src/Policies.h @@ -9,7 +9,6 @@ #include "Configurable.h" #include "HandlerModulePlugin.h" #include "InputModulePlugin.h" -#include "OpenTelemetry.h" #include "Taps.h" #include #include @@ -108,7 +107,6 @@ class Policy : public AbstractRunnableModule void json_metrics(json &j, uint64_t period, bool merge); void prometheus_metrics(std::stringstream &out); - void opentelemetry_metrics(metrics::v1::ScopeMetrics &scope); }; class PolicyManager : public AbstractManager diff --git a/src/StreamHandler.h b/src/StreamHandler.h index f74cc0c39..20441330e 100644 --- a/src/StreamHandler.h +++ b/src/StreamHandler.h @@ -6,7 +6,6 @@ #include "AbstractMetricsManager.h" #include "AbstractModule.h" -#include "CoreRegistry.h" #include "InputEventProxy.h" #include #include @@ -30,7 +29,6 @@ class StreamHandler : public AbstractRunnableModule { protected: std::unique_ptr _event_proxy; - std::string _version{CoreRegistry::DEFAULT_HANDLER_PLUGIN_VERSION}; public: StreamHandler(const std::string &name) @@ -58,22 +56,10 @@ class StreamHandler : public AbstractRunnableModule return _event_proxy.get(); } - void set_version(const std::string &version) - { - _version = version; - } - - const std::string &version() const - { - return _version; - } - virtual void window_json(json &j, uint64_t period, bool merged) = 0; virtual void window_json(json &j, AbstractMetricsBucket *bucket) = 0; virtual void window_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) = 0; virtual void window_prometheus(std::stringstream &out, AbstractMetricsBucket *bucket, Metric::LabelMap add_labels = {}) = 0; - virtual void window_opentelemetry(metrics::v1::ScopeMetrics &scope, Metric::LabelMap add_labels = {}) = 0; - virtual void window_opentelemetry(metrics::v1::ScopeMetrics &scope, AbstractMetricsBucket *bucket, Metric::LabelMap add_labels = {}) = 0; virtual std::unique_ptr merge(AbstractMetricsBucket *bucket, uint64_t period, bool prometheus, bool merged) = 0; }; @@ -237,20 +223,6 @@ class StreamMetricsHandler : public StreamHandler _metrics->window_external_prometheus(out, bucket, add_labels); }; - void window_opentelemetry(metrics::v1::ScopeMetrics &scope, Metric::LabelMap add_labels = {}) override - { - if (_metrics->current_periods() > 1) { - _metrics->window_single_opentelemetry(scope, 1, add_labels); - } else { - _metrics->window_single_opentelemetry(scope, 0, add_labels); - } - } - - void window_opentelemetry(metrics::v1::ScopeMetrics &scope, AbstractMetricsBucket *bucket, Metric::LabelMap add_labels = {}) override - { - _metrics->window_external_opentelemetry(scope, bucket, add_labels); - }; - void check_period_shift(timespec stamp) { _metrics->check_period_shift(stamp); diff --git a/src/VisorLRUList.h b/src/VisorLRUList.h index c6c04ccfb..c4d60e4ca 100644 --- a/src/VisorLRUList.h +++ b/src/VisorLRUList.h @@ -27,7 +27,7 @@ class LRUList { public: typedef typename std::list>::iterator ListIterator; - typedef typename robin_hood::unordered_node_map::iterator MapIterator; + typedef typename robin_hood::unordered_map::iterator MapIterator; /** * A c'tor for this class @@ -71,7 +71,7 @@ class LRUList if (m_MaxSize && m_CacheItemsMap.size() > m_MaxSize) { ListIterator lruIter = m_CacheItemsList.end(); - --lruIter; + lruIter--; if (deletedValue != nullptr) #if __cplusplus > 199711L || _MSC_VER >= 1800 @@ -151,7 +151,7 @@ class LRUList private: std::list> m_CacheItemsList; - robin_hood::unordered_node_map m_CacheItemsMap; + robin_hood::unordered_map m_CacheItemsMap; size_t m_MaxSize; }; diff --git a/src/handlers/bgp/BgpStreamHandler.cpp b/src/handlers/bgp/BgpStreamHandler.cpp index 270c513e7..d163e3595 100644 --- a/src/handlers/bgp/BgpStreamHandler.cpp +++ b/src/handlers/bgp/BgpStreamHandler.cpp @@ -189,29 +189,6 @@ void BgpMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap ad _counters.filtered.to_prometheus(out, add_labels); } -void BgpMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - _rate_total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - - _counters.OPEN.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.UPDATE.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.NOTIFICATION.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.KEEPALIVE.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.ROUTEREFRESH.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.filtered.to_opentelemetry(scope, start_ts, end_ts, add_labels); -} - void BgpMetricsBucket::to_json(json &j) const { diff --git a/src/handlers/bgp/BgpStreamHandler.h b/src/handlers/bgp/BgpStreamHandler.h index 5e74788dc..64d99bd97 100644 --- a/src/handlers/bgp/BgpStreamHandler.h +++ b/src/handlers/bgp/BgpStreamHandler.h @@ -46,7 +46,7 @@ class BgpMetricsBucket final : public visor::AbstractMetricsBucket counters() : OPEN(BGP_SCHEMA, {"wire_packets", "open"}, "Total BGP packets with message type OPEN") - , UPDATE(BGP_SCHEMA, {"wire_packets", "update"}, "Total BGP packets with message type UPDATE") + , UPDATE(BGP_SCHEMA, {"wire_packets", "offer"}, "Total BGP packets with message type KEEPALIVE") , NOTIFICATION(BGP_SCHEMA, {"wire_packets", "notification"}, "Total BGP packets with message type NOTIFICATION") , KEEPALIVE(BGP_SCHEMA, {"wire_packets", "keepalive"}, "Total BGP packets with message type KEEPALIVE") , ROUTEREFRESH(BGP_SCHEMA, {"wire_packets", "routerefresh"}, "Total BGP packets with message type ROUTEREFRESH") @@ -78,7 +78,6 @@ class BgpMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t, uint64_t) override { } diff --git a/src/handlers/bgp/tests/window-schema.json b/src/handlers/bgp/tests/window-schema.json index 52c2f008c..12dae0b69 100644 --- a/src/handlers/bgp/tests/window-schema.json +++ b/src/handlers/bgp/tests/window-schema.json @@ -18,7 +18,7 @@ "filtered": 0, "keepalive": 3, "notification": 0, - "update": 4, + "offer": 4, "open": 2, "routerefresh": 0, "total": 9 @@ -48,7 +48,7 @@ "filtered": 0, "keepalive": 3, "notification": 0, - "update": 4, + "offer": 4, "open": 2, "routerefresh": 0, "total": 9 @@ -113,7 +113,7 @@ "filtered": 0, "keepalive": 3, "notification": 0, - "update": 4, + "offer": 4, "open": 2, "routerefresh": 0, "total": 9 @@ -125,7 +125,7 @@ "filtered", "keepalive", "notification", - "update", + "offer", "open", "routerefresh", "total" @@ -181,10 +181,10 @@ 0 ] }, - "update": { - "$id": "#/properties/bgp/properties/wire_packets/properties/update", + "offer": { + "$id": "#/properties/bgp/properties/wire_packets/properties/offer", "type": "integer", - "title": "The update schema", + "title": "The offer schema", "description": "An explanation about the purpose of this instance.", "default": 0, "examples": [ diff --git a/src/handlers/dhcp/DhcpStreamHandler.cpp b/src/handlers/dhcp/DhcpStreamHandler.cpp index 76f9ad35e..09fa5c37e 100644 --- a/src/handlers/dhcp/DhcpStreamHandler.cpp +++ b/src/handlers/dhcp/DhcpStreamHandler.cpp @@ -50,14 +50,6 @@ void DhcpStreamHandler::start() _metrics->set_recorded_stream(); } - if (config_exists("xact_ttl_ms")) { - auto ttl = config_get("xact_ttl_ms"); - _metrics->set_xact_ttl(static_cast(ttl)); - } else if (config_exists("xact_ttl_secs")) { - auto ttl = config_get("xact_ttl_secs"); - _metrics->set_xact_ttl(static_cast(ttl) * 1000); - } - if (_pcap_proxy) { _pkt_udp_connection = _pcap_proxy->udp_signal.connect(&DhcpStreamHandler::process_udp_packet_cb, this); _start_tstamp_connection = _pcap_proxy->start_tstamp_signal.connect(&DhcpStreamHandler::set_start_tstamp, this); @@ -150,35 +142,6 @@ void DhcpMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap a _dhcp_topServers.to_prometheus(out, add_labels); } -void DhcpMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - _rate_total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - - _counters.DISCOVER.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.OFFER.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.REQUEST.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.ACK.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.SOLICIT.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.ADVERTISE.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.REQUESTV6.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.REPLY.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.filtered.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - _dhcp_topClients.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dhcp_topServers.to_opentelemetry(scope, start_ts, end_ts, add_labels); -} - void DhcpMetricsBucket::to_json(json &j) const { @@ -328,9 +291,9 @@ void DhcpMetricsManager::process_dhcp_layer(pcpp::DhcpLayer *dhcp, pcpp::Packet hostname = option.getValueAsString(); } auto mac_address = dhcp->getClientHardwareAddress().toString(); - _request_ack_manager->start_transaction(dhcp->getDhcpHeader()->transactionID, {{stamp, {0, 0}}, hostname, mac_address}); + _request_ack_manager.start_transaction(dhcp->getDhcpHeader()->transactionID, {{stamp, {0, 0}}, hostname, mac_address}); } else if (type == pcpp::DHCP_ACK) { - auto xact = _request_ack_manager->maybe_end_transaction(dhcp->getDhcpHeader()->transactionID, stamp); + auto xact = _request_ack_manager.maybe_end_transaction(dhcp->getDhcpHeader()->transactionID, stamp); if (xact.first == Result::Valid) { live_bucket()->new_dhcp_transaction(_deep_sampling_now, dhcp, xact.second); } diff --git a/src/handlers/dhcp/DhcpStreamHandler.h b/src/handlers/dhcp/DhcpStreamHandler.h index 8c4fb4faf..1701259a8 100644 --- a/src/handlers/dhcp/DhcpStreamHandler.h +++ b/src/handlers/dhcp/DhcpStreamHandler.h @@ -100,7 +100,6 @@ class DhcpMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t, uint64_t) override { } @@ -119,25 +118,18 @@ class DhcpMetricsBucket final : public visor::AbstractMetricsBucket class DhcpMetricsManager final : public visor::AbstractMetricsManager { - typedef TransactionManager> DhcpTransactionManager; - std::unique_ptr _request_ack_manager; + TransactionManager> _request_ack_manager; public: DhcpMetricsManager(const Configurable *window_config) : visor::AbstractMetricsManager(window_config) - , _request_ack_manager(std::make_unique()) { } void on_period_shift(timespec stamp, [[maybe_unused]] const DhcpMetricsBucket *maybe_expiring_bucket) override { // Dhcp transaction support - _request_ack_manager->purge_old_transactions(stamp); - } - - void set_xact_ttl(uint32_t ttl) - { - _request_ack_manager = std::make_unique(ttl); + _request_ack_manager.purge_old_transactions(stamp); } void process_filtered(timespec stamp); @@ -157,9 +149,7 @@ class DhcpStreamHandler final : public visor::StreamMetricsHandler -#include #include #ifdef __GNUC__ #pragma GCC diagnostic pop @@ -52,7 +51,6 @@ void DnsStreamHandler::start() // default enabled groups _groups.set(group::DnsMetrics::Cardinality); _groups.set(group::DnsMetrics::Counters); - _groups.set(group::DnsMetrics::Quantiles); _groups.set(group::DnsMetrics::DnsTransactions); _groups.set(group::DnsMetrics::TopQnames); _groups.set(group::DnsMetrics::TopPorts); @@ -61,50 +59,21 @@ void DnsStreamHandler::start() // Setup Filters if (config_exists("exclude_noerror") && config_get("exclude_noerror")) { _f_enabled.set(Filters::ExcludingRCode); - _f_rcodes.push_back(NoError); + _f_rcode = NoError; } else if (config_exists("only_rcode")) { - std::vector rcodes; uint64_t want_code; try { want_code = config_get("only_rcode"); } catch (const std::exception &e) { - try { - rcodes = config_get("only_rcode"); - } catch (const std::exception &e) { - throw ConfigException("DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer or an array"); - } + throw ConfigException("DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer"); } - _f_enabled.set(Filters::OnlyRCode); - if (rcodes.empty()) { - if (RCodeNames.find(want_code) != RCodeNames.end()) { - _f_rcodes.push_back(static_cast(want_code)); - } else { - throw ConfigException("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode"); - } - _register_predicate_filter(Filters::OnlyRCode, "only_rcode", std::to_string(want_code)); + if (RCodeNames.find(want_code) != RCodeNames.end()) { + _f_enabled.set(Filters::OnlyRCode); + _f_rcode = want_code; } else { - for (const auto &rcode : rcodes) { - if (std::all_of(rcode.begin(), rcode.end(), ::isdigit)) { - auto value = std::stoul(rcode); - if (RCodeNames.find(value) == RCodeNames.end()) { - throw ConfigException(fmt::format("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode: {}", value)); - } - _f_rcodes.push_back(value); - _register_predicate_filter(Filters::OnlyRCode, "only_rcode", rcode); - } else { - std::string upper_rcode{rcode}; - std::transform(upper_rcode.begin(), upper_rcode.end(), upper_rcode.begin(), - [](unsigned char c) { return std::toupper(c); }); - if (RCodeNumbers.find(upper_rcode) != RCodeNumbers.end()) { - auto value = RCodeNumbers[upper_rcode]; - _f_rcodes.push_back(value); - _register_predicate_filter(Filters::OnlyRCode, "only_rcode", std::to_string(value)); - } else { - throw ConfigException(fmt::format("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode: {}", rcode)); - } - } - } + throw ConfigException("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode"); } + _register_predicate_filter(Filters::OnlyRCode, "only_rcode", std::to_string(_f_rcode)); } if (config_exists("only_queries") && config_get("only_queries")) { _f_enabled.set(Filters::OnlyQueries); @@ -144,23 +113,16 @@ void DnsStreamHandler::start() } } } - if (config_exists("only_qname")) { - _f_enabled.set(Filters::OnlyQName); - for (const auto &qname : config_get("only_qname")) { - std::string qname_ci{qname}; - std::transform(qname_ci.begin(), qname_ci.end(), qname_ci.begin(), - [](unsigned char c) { return std::tolower(c); }); - _f_qnames.emplace_back(qname_ci); - _register_predicate_filter(Filters::OnlyQName, "only_qname", qname_ci); - } - } if (config_exists("only_qname_suffix")) { _f_enabled.set(Filters::OnlyQNameSuffix); for (const auto &qname : config_get("only_qname_suffix")) { + // note, this currently copies the strings, meaning there could be a big list that is duplicated + // we can work on trying to make this a string_view instead + // we copy it out so that we don't have to hit the config mutex std::string qname_ci{qname}; std::transform(qname_ci.begin(), qname_ci.end(), qname_ci.begin(), [](unsigned char c) { return std::tolower(c); }); - _f_qnames_suffix.emplace_back(std::move(qname_ci)); + _f_qnames.emplace_back(std::move(qname_ci)); } } if (config_exists("geoloc_notfound") && config_get("geoloc_notfound")) { @@ -193,19 +155,10 @@ void DnsStreamHandler::start() _metrics->set_recorded_stream(); } - if (config_exists("xact_ttl_ms")) { - auto ttl = config_get("xact_ttl_ms"); - _metrics->set_xact_ttl(static_cast(ttl)); - } else if (config_exists("xact_ttl_secs")) { - auto ttl = config_get("xact_ttl_secs"); - _metrics->set_xact_ttl(static_cast(ttl) * 1000); - } - if (_pcap_proxy) { if (!_using_predicate_signals) { _pkt_udp_connection = _pcap_proxy->udp_signal.connect(&DnsStreamHandler::process_udp_packet_cb, this); } - _pkt_tcp_reassembled_connection = _pcap_proxy->tcp_reassembled_signal.connect(&DnsStreamHandler::process_tcp_reassembled_packet_cb, this); _start_tstamp_connection = _pcap_proxy->start_tstamp_signal.connect([this](timespec stamp) { set_start_tstamp(stamp); _event_proxy ? static_cast(_event_proxy.get())->start_tstamp_signal(stamp) : void(); @@ -240,7 +193,6 @@ void DnsStreamHandler::stop() if (_pcap_proxy) { _pkt_udp_connection.disconnect(); - _pkt_tcp_reassembled_connection.disconnect(); _start_tstamp_connection.disconnect(); _end_tstamp_connection.disconnect(); _tcp_start_connection.disconnect(); @@ -274,8 +226,8 @@ void DnsStreamHandler::process_udp_packet_cb(pcpp::Packet &payload, PacketDirect assert(udpLayer); uint16_t metric_port{0}; - auto dst_port = udpLayer->getDstPort(); - auto src_port = udpLayer->getSrcPort(); + auto dst_port = ntohs(udpLayer->getUdpHeader()->portDst); + auto src_port = ntohs(udpLayer->getUdpHeader()->portSrc); // note we want to capture metrics only when one of the ports is dns, // but metrics on the port which is _not_ the dns port if (DnsLayer::isDnsPort(dst_port)) { @@ -302,39 +254,6 @@ void DnsStreamHandler::process_udp_packet_cb(pcpp::Packet &payload, PacketDirect } } -void DnsStreamHandler::process_tcp_reassembled_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) -{ - pcpp::TcpLayer *tcpLayer = payload.getLayerOfType(); - assert(tcpLayer); - - uint16_t metric_port{0}; - auto dst_port = tcpLayer->getDstPort(); - auto src_port = tcpLayer->getSrcPort(); - // note we want to capture metrics only when one of the ports is dns, - // but metrics on the port which is _not_ the dns port - if (DnsLayer::isDnsPort(dst_port)) { - metric_port = src_port; - } else if (DnsLayer::isDnsPort(src_port)) { - metric_port = dst_port; - } - if (metric_port) { - if (flowkey != _cached_dns_layer.flowKey || stamp.tv_sec != _cached_dns_layer.timestamp.tv_sec || stamp.tv_nsec != _cached_dns_layer.timestamp.tv_nsec) { - _cached_dns_layer.flowKey = flowkey; - _cached_dns_layer.timestamp = stamp; - _cached_dns_layer.dnsLayer = std::make_unique(tcpLayer, &payload); - } - auto dnsLayer = _cached_dns_layer.dnsLayer.get(); - if (!_filtering(*dnsLayer, dir, l3, pcpp::TCP, metric_port, stamp) && _configs(*dnsLayer)) { - _metrics->process_dns_layer(*dnsLayer, dir, l3, pcpp::TCP, flowkey, metric_port, _static_suffix_size, stamp); - _static_suffix_size = 0; - // signal for chained stream handlers, if we have any - if (_event_proxy) { - static_cast(_event_proxy.get())->tcp_reassembled_signal(payload, dir, l3, flowkey, stamp); - } - } - } -} - void DnsTcpSessionData::receive_tcp_data(const uint8_t *data, size_t len) { if (_invalid_data) { @@ -379,6 +298,7 @@ void DnsStreamHandler::tcp_message_ready_cb(int8_t side, const pcpp::TcpStreamDa // check if this flow already appears in the connection manager. If not add it auto iter = _tcp_connections.find(flowKey); + // if not tracking connection, and it's DNS, then start tracking. if (iter == _tcp_connections.end()) { // note we want to capture metrics only when one of the ports is dns, @@ -404,27 +324,14 @@ void DnsStreamHandler::tcp_message_ready_cb(int8_t side, const pcpp::TcpStreamDa // for tcp, endTime is updated by pcpp to represent the time stamp from the latest packet in the stream TIMEVAL_TO_TIMESPEC(&tcpData.getConnectionData().endTime, &stamp); - auto got_dns_message = [this, &conn = tcpData.getConnectionData(), port, dir, l3Type, flowKey, stamp](std::unique_ptr data, size_t size) { + auto got_dns_message = [this, port, dir, l3Type, flowKey, stamp](std::unique_ptr data, size_t size) { // this dummy packet prevents DnsLayer from owning and trying to free the data. it is otherwise unused by the DNS layer, // instead using the packet meta data we pass in pcpp::Packet dummy_packet; - auto dnsLayer = new DnsLayer(data.release(), size, nullptr, nullptr); - dummy_packet.addLayer(dnsLayer, true); - if (!_filtering(*dnsLayer, dir, l3Type, pcpp::TCP, port, stamp) && _configs(*dnsLayer)) { - _metrics->process_dns_layer(*dnsLayer, dir, l3Type, pcpp::TCP, flowKey, port, _static_suffix_size, stamp); + DnsLayer dnsLayer(data.get(), size, nullptr, &dummy_packet); + if (!_filtering(dnsLayer, dir, l3Type, pcpp::UDP, port, stamp) && _configs(dnsLayer)) { + _metrics->process_dns_layer(dnsLayer, dir, l3Type, pcpp::TCP, flowKey, port, _static_suffix_size, stamp); _static_suffix_size = 0; - // signal for chained stream handlers, if we have any - if (_event_proxy) { - dummy_packet.detachLayer(dnsLayer); - if (conn.srcIP.isIPv4()) { - dummy_packet.addLayer(new pcpp::IPv4Layer(conn.srcIP.getIPv4(), conn.dstIP.getIPv4()), true); - } else { - dummy_packet.addLayer(new pcpp::IPv6Layer(conn.srcIP.getIPv6(), conn.dstIP.getIPv6()), true); - } - dummy_packet.addLayer(new pcpp::TcpLayer(conn.srcPort, conn.dstPort), true); - dummy_packet.addLayer(dnsLayer, true); - static_cast(_event_proxy.get())->tcp_reassembled_signal(dummy_packet, dir, l3Type, flowKey, stamp); - } } // data is freed upon return }; @@ -485,9 +392,8 @@ void DnsStreamHandler::info_json(json &j) const inline void DnsStreamHandler::_register_predicate_filter(Filters filter, std::string f_key, std::string f_value) { - PcapInputEventProxy::UdpPredicate predicate; - if (filter == Filters::OnlyRCode) { - // all DnsStreamHandler race to install this predicate, which is only installed once per thread and called once per udp event + if (!_using_predicate_signals && filter == Filters::OnlyRCode) { + // all DnsStreamHandler race to install this predicate, which is only installed once and called once per udp event // it's job is to return the predicate "jump key" to call matching signals static thread_local auto udp_rcode_predicate = [&cache = _cached_dns_layer](pcpp::Packet &payload, PacketDirection, pcpp::ProtocolType, uint32_t flowkey, timespec stamp) -> std::string { pcpp::UdpLayer *udpLayer = payload.getLayerOfType(); @@ -504,51 +410,23 @@ inline void DnsStreamHandler::_register_predicate_filter(Filters filter, std::st } return std::string(DNS_SCHEMA) + "only_rcode" + std::to_string(dnsLayer->getDnsHeader()->responseCode); }; - predicate = udp_rcode_predicate; - } else if (filter == Filters::OnlyQName) { - static thread_local auto udp_qname_predicate = [&cache = _cached_dns_layer](pcpp::Packet &payload, PacketDirection, pcpp::ProtocolType, uint32_t flowkey, timespec stamp) -> std::string { - pcpp::UdpLayer *udpLayer = payload.getLayerOfType(); - assert(udpLayer); - if (flowkey != cache.flowKey || stamp.tv_sec != cache.timestamp.tv_sec || stamp.tv_nsec != cache.timestamp.tv_nsec) { - cache.flowKey = flowkey; - cache.timestamp = stamp; - cache.dnsLayer = std::make_unique(udpLayer, &payload); - } - auto dnsLayer = cache.dnsLayer.get(); - // return the 'jump key' for pcap to make O(1) call to appropriate signals - if (!dnsLayer->parseResources(true) || dnsLayer->getFirstQuery() == nullptr) { - return std::string(DNS_SCHEMA) + "only_qname"; // invalid qname - } - return std::string(DNS_SCHEMA) + "only_qname" + dnsLayer->getFirstQuery()->getNameLower(); + + // if the jump key matches, this callback fires + auto rcode_signal = [this](pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) { + process_udp_packet_cb(payload, dir, l3, flowkey, stamp); }; - predicate = udp_qname_predicate; - } - // if the jump key matches, this callback fires - auto signal = [filter, this](pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) { - _predicate_filter_type = filter; - process_udp_packet_cb(payload, dir, l3, flowkey, stamp); - _predicate_filter_type = Filters::FiltersMAX; - }; - if (_pcap_proxy) { - // even though predicate and callback are sent, pcap will only install the first one it sees from dns handler - // module name is sent to allow disconnect at shutdown time - _pcap_proxy->register_udp_predicate_signal(schema_key(), name(), f_key, f_value, predicate, signal); - _using_predicate_signals = true; + if (_pcap_proxy) { + // even though predicate and callback are sent, pcap will only install the first one it sees from dns handler + // module name is sent to allow disconnect at shutdown time + _pcap_proxy->register_udp_predicate_signal(schema_key(), name(), f_key, f_value, udp_rcode_predicate, rcode_signal); + _using_predicate_signals = true; + } } } inline bool DnsStreamHandler::_filtering(DnsLayer &payload, [[maybe_unused]] PacketDirection dir, [[maybe_unused]] pcpp::ProtocolType l3, [[maybe_unused]] pcpp::ProtocolType l4, [[maybe_unused]] uint16_t port, timespec stamp) { - if (_f_enabled[Filters::ExcludingRCode]) { - auto rcode = payload.getDnsHeader()->responseCode; - if (std::any_of(_f_rcodes.begin(), _f_rcodes.end(), [rcode](uint16_t f_rcode) { return rcode == f_rcode; })) { - goto will_filter; - } - } - if (_f_enabled[Filters::OnlyRCode] && _predicate_filter_type != Filters::OnlyRCode) { - auto rcode = payload.getDnsHeader()->responseCode; - if (std::none_of(_f_rcodes.begin(), _f_rcodes.end(), [rcode](uint16_t f_rcode) { return rcode == f_rcode; })) { - goto will_filter; - } + if (_f_enabled[Filters::ExcludingRCode] && payload.getDnsHeader()->responseCode == _f_rcode) { + goto will_filter; } if (_f_enabled[Filters::AnswerCount] && payload.getAnswerCount() != _f_answer_count) { goto will_filter; @@ -591,22 +469,12 @@ inline bool DnsStreamHandler::_filtering(DnsLayer &payload, [[maybe_unused]] Pac goto will_filter; } } - if (_f_enabled[Filters::OnlyQName] && _predicate_filter_type != Filters::OnlyQName) { - if (!payload.parseResources(true) || payload.getFirstQuery() == nullptr) { - goto will_filter; - } - std::string_view qname_ci = payload.getFirstQuery()->getNameLower(); - if (std::none_of(_f_qnames.begin(), _f_qnames.end(), [&qname_ci](std::string fqn) { return qname_ci == fqn; })) { - // checked the whole list and none of them matched: filter - goto will_filter; - } - } if (_f_enabled[Filters::OnlyQNameSuffix]) { if (!payload.parseResources(true) || payload.getFirstQuery() == nullptr) { goto will_filter; } std::string_view qname_ci = payload.getFirstQuery()->getNameLower(); - if (std::none_of(_f_qnames_suffix.begin(), _f_qnames_suffix.end(), [this, &qname_ci](std::string fqn) { + if (std::none_of(_f_qnames.begin(), _f_qnames.end(), [this, qname_ci](std::string fqn) { if (ends_with(qname_ci, fqn)) { _static_suffix_size = fqn.size(); return true; @@ -689,15 +557,9 @@ void DnsMetricsBucket::specialized_merge(const AbstractMetricsBucket &o, Metric: _counters.xacts_out += other._counters.xacts_out; _counters.xacts_timed_out += other._counters.xacts_timed_out; - if (group_enabled(group::DnsMetrics::Quantiles)) { - _dnsXactFromTimeUs.merge(other._dnsXactFromTimeUs, agg_operator); - _dnsXactToTimeUs.merge(other._dnsXactToTimeUs, agg_operator); - _dnsXactRatio.merge(other._dnsXactRatio, agg_operator); - } - if (group_enabled(group::DnsMetrics::Histograms)) { - _dnsXactFromHistTimeUs.merge(other._dnsXactFromHistTimeUs); - _dnsXactToHistTimeUs.merge(other._dnsXactToHistTimeUs); - } + _dnsXactFromTimeUs.merge(other._dnsXactFromTimeUs, agg_operator); + _dnsXactToTimeUs.merge(other._dnsXactToTimeUs, agg_operator); + _dnsXactRatio.merge(other._dnsXactRatio, agg_operator); _dns_slowXactIn.merge(other._dns_slowXactIn); _dns_slowXactOut.merge(other._dns_slowXactOut); } @@ -776,16 +638,9 @@ void DnsMetricsBucket::to_json(json &j) const _counters.xacts_in.to_json(j); _dns_slowXactIn.to_json(j); - if (group_enabled(group::DnsMetrics::Quantiles)) { - _dnsXactFromTimeUs.to_json(j); - _dnsXactToTimeUs.to_json(j); - _dnsXactRatio.to_json(j); - } - - if (group_enabled(group::DnsMetrics::Histograms)) { - _dnsXactFromHistTimeUs.to_json(j); - _dnsXactToHistTimeUs.to_json(j); - } + _dnsXactFromTimeUs.to_json(j); + _dnsXactToTimeUs.to_json(j); + _dnsXactRatio.to_json(j); _counters.xacts_out.to_json(j); _dns_slowXactOut.to_json(j); @@ -1103,20 +958,14 @@ void DnsMetricsBucket::new_dns_transaction(bool deep, float to90th, float from90 if (dir == PacketDirection::toHost) { ++_counters.xacts_out; - if (deep && group_enabled(group::DnsMetrics::Quantiles)) { + if (deep) { _dnsXactFromTimeUs.update(xactTime); } - if (deep && group_enabled(group::DnsMetrics::Histograms)) { - _dnsXactFromHistTimeUs.update(xactTime); - } } else if (dir == PacketDirection::fromHost) { ++_counters.xacts_in; - if (deep && group_enabled(group::DnsMetrics::Quantiles)) { + if (deep) { _dnsXactToTimeUs.update(xactTime); } - if (deep && group_enabled(group::DnsMetrics::Histograms)) { - _dnsXactToHistTimeUs.update(xactTime); - } } if (deep) { @@ -1177,16 +1026,9 @@ void DnsMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap ad _counters.xacts_in.to_prometheus(out, add_labels); _dns_slowXactIn.to_prometheus(out, add_labels); - if (group_enabled(group::DnsMetrics::Quantiles)) { - _dnsXactFromTimeUs.to_prometheus(out, add_labels); - _dnsXactToTimeUs.to_prometheus(out, add_labels); - _dnsXactRatio.to_prometheus(out, add_labels); - } - - if (group_enabled(group::DnsMetrics::Histograms)) { - _dnsXactFromHistTimeUs.to_prometheus(out, add_labels); - _dnsXactToHistTimeUs.to_prometheus(out, add_labels); - } + _dnsXactFromTimeUs.to_prometheus(out, add_labels); + _dnsXactToTimeUs.to_prometheus(out, add_labels); + _dnsXactRatio.to_prometheus(out, add_labels); _counters.xacts_out.to_prometheus(out, add_labels); _dns_slowXactOut.to_prometheus(out, add_labels); @@ -1237,108 +1079,6 @@ void DnsMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap ad } }); } - -void DnsMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - _rate_total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - if (group_enabled(group::DnsMetrics::Counters)) { - _counters.queries.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.replies.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.TCP.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.UDP.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.IPv4.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.IPv6.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.NX.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.REFUSED.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.SRVFAIL.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.RNOERROR.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.NODATA.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.filtered.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::DnsMetrics::Cardinality)) { - _dns_qnameCard.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::DnsMetrics::DnsTransactions)) { - _counters.xacts_total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.xacts_timed_out.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - _counters.xacts_in.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_slowXactIn.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - if (group_enabled(group::DnsMetrics::Quantiles)) { - _dnsXactFromTimeUs.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dnsXactToTimeUs.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dnsXactRatio.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::DnsMetrics::Histograms)) { - _dnsXactFromHistTimeUs.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dnsXactToHistTimeUs.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - _counters.xacts_out.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_slowXactOut.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::DnsMetrics::TopPorts)) { - _dns_topUDPPort.to_opentelemetry(scope, start_ts, end_ts, add_labels, [](const uint16_t &val) { return std::to_string(val); }); - } - if (group_enabled(group::DnsMetrics::TopEcs)) { - group_enabled(group::DnsMetrics::Counters) ? _counters.queryECS.to_opentelemetry(scope, start_ts, end_ts, add_labels) : void(); - _dns_topGeoLocECS.to_opentelemetry(scope, start_ts, end_ts, add_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - _dns_topASNECS.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_topQueryECS.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::DnsMetrics::TopQnames)) { - _dns_topQname2.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_topQname3.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_topNX.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_topREFUSED.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - _dns_topSRVFAIL.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_topNODATA.to_opentelemetry(scope, start_ts, end_ts, add_labels); - if (group_enabled(group::DnsMetrics::TopQnamesDetails)) { - _dns_topSizedQnameResp.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dns_topNOERROR.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - } - _dns_topRCode.to_opentelemetry(scope, start_ts, end_ts, add_labels, [](const uint16_t &val) { - if (RCodeNames.find(val) != RCodeNames.end()) { - return RCodeNames[val]; - } else { - return std::to_string(val); - } - }); - - _dns_topQType.to_opentelemetry(scope, start_ts, end_ts, add_labels, [](const uint16_t &val) { - if (QTypeNames.find(val) != QTypeNames.end()) { - return QTypeNames[val]; - } else { - return std::to_string(val); - } - }); -} - void DnsMetricsBucket::process_filtered() { std::unique_lock lock(_mutex); @@ -1358,14 +1098,14 @@ void DnsMetricsManager::process_dns_layer(DnsLayer &payload, PacketDirection dir if (group_enabled(group::DnsMetrics::DnsTransactions)) { // handle dns transactions (query/response pairs) if (payload.getDnsHeader()->queryOrResponse == QR::response) { - auto xact = _qr_pair_manager->maybe_end_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), stamp); + auto xact = _qr_pair_manager.maybe_end_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), stamp); if (xact.first == Result::Valid) { live_bucket()->new_dns_transaction(_deep_sampling_now, _to90th, _from90th, payload, dir, xact.second); } else if (xact.first == Result::TimedOut) { live_bucket()->inc_xact_timed_out(1); } } else { - _qr_pair_manager->start_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), {{stamp, {0, 0}}, payload.getDataLen()}); + _qr_pair_manager.start_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), {{stamp, {0, 0}}, payload.getDataLen()}); } } } diff --git a/src/handlers/dns/v1/DnsStreamHandler.h b/src/handlers/dns/v1/DnsStreamHandler.h index 2d10cc64a..113c7db31 100644 --- a/src/handlers/dns/v1/DnsStreamHandler.h +++ b/src/handlers/dns/v1/DnsStreamHandler.h @@ -36,8 +36,6 @@ namespace group { enum DnsMetrics : visor::MetricGroupIntType { Cardinality, Counters, - Quantiles, - Histograms, DnsTransactions, TopEcs, TopQnames, @@ -67,8 +65,6 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket Quantile _dnsXactFromTimeUs; Quantile _dnsXactToTimeUs; - Histogram _dnsXactFromHistTimeUs; - Histogram _dnsXactToHistTimeUs; Quantile _dnsXactRatio; Cardinality _dns_qnameCard; @@ -125,11 +121,11 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket , DOH(DNS_SCHEMA, {"wire_packets", "doh"}, "Total DNS wire packets received over DNS over HTTPS") , IPv4(DNS_SCHEMA, {"wire_packets", "ipv4"}, "Total DNS wire packets received over IPv4 (ingress and egress)") , IPv6(DNS_SCHEMA, {"wire_packets", "ipv6"}, "Total DNS wire packets received over IPv6 (ingress and egress)") - , NX(DNS_SCHEMA, {"wire_packets", "nxdomain"}, "Total DNS wire packets flagged as reply with response code NXDOMAIN (ingress and egress)") - , REFUSED(DNS_SCHEMA, {"wire_packets", "refused"}, "Total DNS wire packets flagged as reply with response code REFUSED (ingress and egress)") - , SRVFAIL(DNS_SCHEMA, {"wire_packets", "srvfail"}, "Total DNS wire packets flagged as reply with response code SRVFAIL (ingress and egress)") - , RNOERROR(DNS_SCHEMA, {"wire_packets", "noerror"}, "Total DNS wire packets flagged as reply with response code NOERROR (ingress and egress)") - , NODATA(DNS_SCHEMA, {"wire_packets", "nodata"}, "Total DNS wire packets flagged as reply with response code NOERROR and no answer section data (ingress and egress)") + , NX(DNS_SCHEMA, {"wire_packets", "nxdomain"}, "Total DNS wire packets flagged as reply with return code NXDOMAIN (ingress and egress)") + , REFUSED(DNS_SCHEMA, {"wire_packets", "refused"}, "Total DNS wire packets flagged as reply with return code REFUSED (ingress and egress)") + , SRVFAIL(DNS_SCHEMA, {"wire_packets", "srvfail"}, "Total DNS wire packets flagged as reply with return code SRVFAIL (ingress and egress)") + , RNOERROR(DNS_SCHEMA, {"wire_packets", "noerror"}, "Total DNS wire packets flagged as reply with return code NOERROR (ingress and egress)") + , NODATA(DNS_SCHEMA, {"wire_packets", "nodata"}, "Total DNS wire packets flagged as reply with return code NOERROR and no answer section data (ingress and egress)") , total(DNS_SCHEMA, {"wire_packets", "total"}, "Total DNS wire packets matching the configured filter(s)") , filtered(DNS_SCHEMA, {"wire_packets", "filtered"}, "Total DNS wire packets seen that did not match the configured filter(s) (if any)") , queryECS(DNS_SCHEMA, {"wire_packets", "query_ecs"}, "Total queries that have EDNS Client Subnet (ECS) field set") @@ -144,8 +140,6 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket DnsMetricsBucket() : _dnsXactFromTimeUs(DNS_SCHEMA, {"xact", "out", "quantiles_us"}, "Quantiles of transaction timing (query/reply pairs) when host is client, in microseconds") , _dnsXactToTimeUs(DNS_SCHEMA, {"xact", "in", "quantiles_us"}, "Quantiles of transaction timing (query/reply pairs) when host is server, in microseconds") - , _dnsXactFromHistTimeUs(DNS_SCHEMA, {"xact", "out", "histogram_us"}, "Histogram of transaction timing (query/reply pairs) when host is client, in microseconds") - , _dnsXactToHistTimeUs(DNS_SCHEMA, {"xact", "in", "histogram_us"}, "Histogram of transaction timing (query/reply pairs) when host is server, in microseconds") , _dnsXactRatio(DNS_SCHEMA, {"xact", "ratio", "quantiles"}, "Quantiles of ratio of packet sizes in a DNS transaction (reply/query)") , _dns_qnameCard(DNS_SCHEMA, {"cardinality", "qname"}, "Cardinality of unique QNAMES, both ingress and egress") , _dns_topGeoLocECS(DNS_SCHEMA, "geo_loc", {"top_geoLoc_ecs"}, "Top GeoIP ECS locations") @@ -199,7 +193,6 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) override { _dns_topGeoLocECS.set_settings(topn_count, percentile_threshold); @@ -237,22 +230,20 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket class DnsMetricsManager final : public visor::AbstractMetricsManager { using DnsXactID = std::pair; - typedef lib::transaction::TransactionManager DnsTransactionManager; - std::unique_ptr _qr_pair_manager; + visor::lib::transaction::TransactionManager _qr_pair_manager; float _to90th{0.0}; float _from90th{0.0}; public: DnsMetricsManager(const Configurable *window_config) : visor::AbstractMetricsManager(window_config) - , _qr_pair_manager(std::make_unique()) { } void on_period_shift(timespec stamp, [[maybe_unused]] const DnsMetricsBucket *maybe_expiring_bucket) override { // DNS transaction support - auto timed_out = _qr_pair_manager->purge_old_transactions(stamp); + auto timed_out = _qr_pair_manager.purge_old_transactions(stamp); if (timed_out) { live_bucket()->inc_xact_timed_out(timed_out); } @@ -268,12 +259,7 @@ class DnsMetricsManager final : public visor::AbstractMetricsManageropen_transaction_count(); - } - - void set_xact_ttl(uint32_t ttl) - { - _qr_pair_manager = std::make_unique(ttl); + return _qr_pair_manager.open_transaction_count(); } void process_filtered(timespec stamp); @@ -318,7 +304,6 @@ class DnsStreamHandler final : public visor::StreamMetricsHandler _c_enabled; - std::vector _f_rcodes; + uint16_t _f_rcode{0}; uint64_t _f_answer_count{0}; - std::vector _f_qnames_suffix; std::vector _f_qnames; std::vector _f_qtypes; size_t _static_suffix_size{0}; std::bitset _f_dnstap_types; bool _using_predicate_signals{false}; - Filters _predicate_filter_type{Filters::FiltersMAX}; static const inline StreamMetricsHandler::ConfigsDefType _config_defs = { "exclude_noerror", @@ -386,21 +367,16 @@ class DnsStreamHandler final : public visor::StreamMetricsHandler #include #include #include +#include #include #ifdef __GNUC__ #pragma GCC diagnostic pop @@ -331,7 +332,7 @@ TEST_CASE("DNS Filters: only_rcode nx", "[pcap][net]") REQUIRE(j["wire_packets"]["filtered"] == 0); } -TEST_CASE("DNS Filters: only_rcode nx and refused", "[pcap][dns]") +TEST_CASE("DNS Filters: only_rcode refused", "[pcap][dns]") { PcapInputStream stream{"pcap-test"}; @@ -345,7 +346,7 @@ TEST_CASE("DNS Filters: only_rcode nx and refused", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - dns_handler.config_set("only_rcode", {"nxdomain", "5"}); + dns_handler.config_set("only_rcode", Refused); dns_handler.start(); stream.start(); @@ -356,7 +357,7 @@ TEST_CASE("DNS Filters: only_rcode nx and refused", "[pcap][dns]") REQUIRE(counters.RNOERROR.value() == 0); REQUIRE(counters.SRVFAIL.value() == 0); REQUIRE(counters.REFUSED.value() == 1); - REQUIRE(counters.NX.value() == 1); + REQUIRE(counters.NX.value() == 0); REQUIRE(counters.NODATA.value() == 0); nlohmann::json j; dns_handler.metrics()->bucket(0)->to_json(j); @@ -376,7 +377,7 @@ TEST_CASE("DNS Filters: only_qtypes AAAA and TXT", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - // notice case-insensitive + // notice case insensitive dns_handler.config_set("only_qtype", {"AAAA", "TxT"}); dns_handler.start(); stream.start(); @@ -441,45 +442,6 @@ TEST_CASE("DNS TopN custom size", "[pcap][dns]") CHECK(j["top_qtype"][3] == nullptr); } -TEST_CASE("DNS Filters: only_qname", "[pcap][dns]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_mixed_rcode.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - - // notice, case-insensitive - dns_handler.config_set("only_qname", {"play.GooGle.com", "nonexistent.google.com"}); - dns_handler.start(); - stream.start(); - stream.stop(); - dns_handler.stop(); - - auto counters = dns_handler.metrics()->bucket(0)->counters(); - - CHECK(counters.UDP.value() == 6); - CHECK(counters.RNOERROR.value() == 2); - CHECK(counters.SRVFAIL.value() == 0); - CHECK(counters.REFUSED.value() == 0); - CHECK(counters.NX.value() == 1); - CHECK(counters.NODATA.value() == 2); - CHECK(counters.total.value() == 6); - CHECK(counters.filtered.value() == 0); - - nlohmann::json j; - dns_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["top_qname2"][0]["name"] == ".google.com"); - CHECK(j["top_qname3"][0]["name"] == "play.google.com"); -} - TEST_CASE("DNS Filters: only_qname_suffix", "[pcap][dns]") { @@ -494,7 +456,7 @@ TEST_CASE("DNS Filters: only_qname_suffix", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - // notice, case-insensitive + // notice, case insensitive dns_handler.config_set("only_qname_suffix", {"GooGle.com"}); dns_handler.start(); stream.start(); @@ -867,7 +829,7 @@ TEST_CASE("DNS filter exceptions", "[pcap][dns][filter]") SECTION("only_rcode as string") { dns_handler.config_set("only_rcode", "1"); - REQUIRE_THROWS_WITH(dns_handler.start(), "DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer or an array"); + REQUIRE_THROWS_WITH(dns_handler.start(), "DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer"); } SECTION("only_rcode invalid") @@ -908,10 +870,9 @@ TEST_CASE("DNS groups", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - SECTION("disable cardinality and counters and enable histograms") + SECTION("disable cardinality and counters") { dns_handler.config_set("disable", {"cardinality", "counters"}); - dns_handler.config_set("enable", {"histograms"}); dns_handler.start(); stream.start(); @@ -940,7 +901,6 @@ TEST_CASE("DNS groups", "[pcap][dns]") CHECK(j["cardinality"]["qname"] == nullptr); CHECK(j["top_qname2"][0]["name"] == ".test.com"); CHECK(j["xact"]["ratio"]["quantiles"]["p50"] != nullptr); - CHECK(j["xact"]["out"]["histogram_us"]["buckets"] != nullptr); } SECTION("disable TopQname and Dns Transactions") @@ -978,13 +938,13 @@ TEST_CASE("DNS groups", "[pcap][dns]") SECTION("disable invalid dns group") { dns_handler.config_set("disable", {"top_qnames", "dns_top_wired"}); - REQUIRE_THROWS_WITH(dns_handler.start(), "dns_top_wired is an invalid/unsupported metric group. The valid groups are: all, cardinality, counters, dns_transaction, histograms, quantiles, top_ecs, top_ports, top_qnames, top_qnames_details"); + REQUIRE_THROWS_WITH(dns_handler.start(), "dns_top_wired is an invalid/unsupported metric group. The valid groups are: all, cardinality, counters, dns_transaction, top_ecs, top_ports, top_qnames, top_qnames_details"); } SECTION("enable invalid dns group") { dns_handler.config_set("enable", {"top_qnames", "dns_top_wired"}); - REQUIRE_THROWS_WITH(dns_handler.start(), "dns_top_wired is an invalid/unsupported metric group. The valid groups are: all, cardinality, counters, dns_transaction, histograms, quantiles, top_ecs, top_ports, top_qnames, top_qnames_details"); + REQUIRE_THROWS_WITH(dns_handler.start(), "dns_top_wired is an invalid/unsupported metric group. The valid groups are: all, cardinality, counters, dns_transaction, top_ecs, top_ports, top_qnames, top_qnames_details"); } } @@ -998,20 +958,7 @@ TEST_CASE("DNS invalid config", "[dns][filter][config]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; dns_handler.config_set("invalid_config", true); - REQUIRE_THROWS_WITH(dns_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: exclude_noerror, only_rcode, only_queries, only_responses, only_dnssec_response, answer_count, only_qtype, only_qname, only_qname_suffix, geoloc_notfound, asn_notfound, dnstap_msg_type, public_suffix_list, recorded_stream, xact_ttl_secs, xact_ttl_ms, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); -} - -TEST_CASE("DNS config ttl", "[dns][config]") -{ - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - c.config_set("xact_ttl_secs", 2); - DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - REQUIRE_NOTHROW(dns_handler.start()); + REQUIRE_THROWS_WITH(dns_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: exclude_noerror, only_rcode, only_queries, only_responses, only_dnssec_response, answer_count, only_qtype, only_qname_suffix, geoloc_notfound, asn_notfound, dnstap_msg_type, public_suffix_list, recorded_stream, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); } TEST_CASE("DNS Filters: only_rcode with predicate", "[pcap][dns][filter]") diff --git a/src/handlers/dns/v2/DnsStreamHandler.cpp b/src/handlers/dns/v2/DnsStreamHandler.cpp index a2454e8df..ca57c9a9f 100644 --- a/src/handlers/dns/v2/DnsStreamHandler.cpp +++ b/src/handlers/dns/v2/DnsStreamHandler.cpp @@ -16,7 +16,6 @@ #pragma clang diagnostic ignored "-Wc99-extensions" #endif #include -#include #include #ifdef __GNUC__ #pragma GCC diagnostic pop @@ -61,45 +60,19 @@ void DnsStreamHandler::start() // Setup Filters if (config_exists("exclude_noerror") && config_get("exclude_noerror")) { _f_enabled.set(Filters::ExcludingRCode); - _f_rcodes.push_back(NoError); + _f_rcode = NoError; } else if (config_exists("only_rcode")) { - std::vector rcodes; uint64_t want_code; try { want_code = config_get("only_rcode"); } catch (const std::exception &e) { - try { - rcodes = config_get("only_rcode"); - } catch (const std::exception &e) { - throw ConfigException("DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer or an array"); - } + throw ConfigException("DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer"); } - _f_enabled.set(Filters::OnlyRCode); - if (rcodes.empty()) { - if (RCodeNames.find(want_code) != RCodeNames.end()) { - _f_rcodes.push_back(static_cast(want_code)); - } else { - throw ConfigException("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode"); - } + if (RCodeNames.find(want_code) != RCodeNames.end()) { + _f_enabled.set(Filters::OnlyRCode); + _f_rcode = want_code; } else { - for (const auto &rcode : rcodes) { - if (std::all_of(rcode.begin(), rcode.end(), ::isdigit)) { - auto value = std::stoul(rcode); - if (RCodeNames.find(value) == RCodeNames.end()) { - throw ConfigException(fmt::format("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode: {}", value)); - } - _f_rcodes.push_back(value); - } else { - std::string upper_rcode{rcode}; - std::transform(upper_rcode.begin(), upper_rcode.end(), upper_rcode.begin(), - [](unsigned char c) { return std::toupper(c); }); - if (RCodeNumbers.find(upper_rcode) != RCodeNumbers.end()) { - _f_rcodes.push_back(RCodeNumbers[upper_rcode]); - } else { - throw ConfigException(fmt::format("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode: {}", rcode)); - } - } - } + throw ConfigException("DnsStreamHandler: only_rcode filter contained an invalid/unsupported rcode"); } } if (config_exists("only_dnssec_response") && config_get("only_dnssec_response")) { @@ -150,22 +123,16 @@ void DnsStreamHandler::start() } } } - if (config_exists("only_qname")) { - _f_enabled.set(Filters::OnlyQName); - for (const auto &qname : config_get("only_qname")) { - std::string qname_ci{qname}; - std::transform(qname_ci.begin(), qname_ci.end(), qname_ci.begin(), - [](unsigned char c) { return std::tolower(c); }); - _f_qnames.emplace_back(std::move(qname_ci)); - } - } if (config_exists("only_qname_suffix")) { _f_enabled.set(Filters::OnlyQNameSuffix); for (const auto &qname : config_get("only_qname_suffix")) { + // note, this currently copies the strings, meaning there could be a big list that is duplicated + // we can work on trying to make this a string_view instead + // we copy it out so that we don't have to hit the config mutex std::string qname_ci{qname}; std::transform(qname_ci.begin(), qname_ci.end(), qname_ci.begin(), [](unsigned char c) { return std::tolower(c); }); - _f_qnames_suffix.emplace_back(std::move(qname_ci)); + _f_qnames.emplace_back(std::move(qname_ci)); } } if (config_exists("geoloc_notfound") && config_get("geoloc_notfound")) { @@ -198,17 +165,8 @@ void DnsStreamHandler::start() _metrics->set_recorded_stream(); } - if (config_exists("xact_ttl_ms")) { - auto ttl = config_get("xact_ttl_ms"); - _metrics->set_xact_ttl(static_cast(ttl)); - } else if (config_exists("xact_ttl_secs")) { - auto ttl = config_get("xact_ttl_secs"); - _metrics->set_xact_ttl(static_cast(ttl) * 1000); - } - if (_pcap_proxy) { _pkt_udp_connection = _pcap_proxy->udp_signal.connect(&DnsStreamHandler::process_udp_packet_cb, this); - _pkt_tcp_reassembled_connection = _pcap_proxy->tcp_reassembled_signal.connect(&DnsStreamHandler::process_tcp_reassembled_packet_cb, this); _start_tstamp_connection = _pcap_proxy->start_tstamp_signal.connect([this](timespec stamp) { set_start_tstamp(stamp); _event_proxy ? static_cast(_event_proxy.get())->start_tstamp_signal(stamp) : void(); @@ -243,7 +201,6 @@ void DnsStreamHandler::stop() if (_pcap_proxy) { _pkt_udp_connection.disconnect(); - _pkt_tcp_reassembled_connection.disconnect(); _start_tstamp_connection.disconnect(); _end_tstamp_connection.disconnect(); _tcp_start_connection.disconnect(); @@ -274,8 +231,8 @@ void DnsStreamHandler::process_udp_packet_cb(pcpp::Packet &payload, PacketDirect assert(udpLayer); uint16_t metric_port{0}; - auto dst_port = udpLayer->getDstPort(); - auto src_port = udpLayer->getSrcPort(); + auto dst_port = ntohs(udpLayer->getUdpHeader()->portDst); + auto src_port = ntohs(udpLayer->getUdpHeader()->portSrc); // note we want to capture metrics only when one of the ports is dns, // but metrics on the port which is _not_ the dns port if (DnsLayer::isDnsPort(dst_port)) { @@ -302,39 +259,6 @@ void DnsStreamHandler::process_udp_packet_cb(pcpp::Packet &payload, PacketDirect } } -void DnsStreamHandler::process_tcp_reassembled_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) -{ - pcpp::TcpLayer *tcpLayer = payload.getLayerOfType(); - assert(tcpLayer); - - uint16_t metric_port{0}; - auto dst_port = tcpLayer->getDstPort(); - auto src_port = tcpLayer->getSrcPort(); - // note we want to capture metrics only when one of the ports is dns, - // but metrics on the port which is _not_ the dns port - if (DnsLayer::isDnsPort(dst_port)) { - metric_port = src_port; - } else if (DnsLayer::isDnsPort(src_port)) { - metric_port = dst_port; - } - if (metric_port) { - if (flowkey != _cached_dns_layer.flowKey || stamp.tv_sec != _cached_dns_layer.timestamp.tv_sec || stamp.tv_nsec != _cached_dns_layer.timestamp.tv_nsec) { - _cached_dns_layer.flowKey = flowkey; - _cached_dns_layer.timestamp = stamp; - _cached_dns_layer.dnsLayer = std::make_unique(tcpLayer, &payload); - } - auto dnsLayer = _cached_dns_layer.dnsLayer.get(); - if (!_filtering(*dnsLayer, dir, flowkey, stamp) && _configs(*dnsLayer)) { - _metrics->process_dns_layer(*dnsLayer, dir, l3, pcpp::TCP, flowkey, metric_port, _static_suffix_size, stamp); - _static_suffix_size = 0; - // signal for chained stream handlers, if we have any - if (_event_proxy) { - static_cast(_event_proxy.get())->tcp_reassembled_signal(payload, dir, l3, flowkey, stamp); - } - } - } -} - void DnsTcpSessionData::receive_tcp_data(const uint8_t *data, size_t len) { if (_invalid_data) { @@ -379,6 +303,7 @@ void DnsStreamHandler::tcp_message_ready_cb(int8_t side, const pcpp::TcpStreamDa // check if this flow already appears in the connection manager. If not add it auto iter = _tcp_connections.find(flowKey); + // if not tracking connection, and it's DNS, then start tracking. if (iter == _tcp_connections.end()) { // note we want to capture metrics only when one of the ports is dns, @@ -404,27 +329,14 @@ void DnsStreamHandler::tcp_message_ready_cb(int8_t side, const pcpp::TcpStreamDa // for tcp, endTime is updated by pcpp to represent the time stamp from the latest packet in the stream TIMEVAL_TO_TIMESPEC(&tcpData.getConnectionData().endTime, &stamp); - auto got_dns_message = [this, &conn = tcpData.getConnectionData(), port, dir, l3Type, flowKey, stamp](std::unique_ptr data, size_t size) { + auto got_dns_message = [this, port, dir, l3Type, flowKey, stamp](std::unique_ptr data, size_t size) { // this dummy packet prevents DnsLayer from owning and trying to free the data. it is otherwise unused by the DNS layer, // instead using the packet meta data we pass in pcpp::Packet dummy_packet; - auto dnsLayer = new DnsLayer(data.release(), size, nullptr, nullptr); - dummy_packet.addLayer(dnsLayer, true); - if (!_filtering(*dnsLayer, dir, l3Type, stamp) && _configs(*dnsLayer)) { - _metrics->process_dns_layer(*dnsLayer, dir, l3Type, pcpp::TCP, flowKey, port, _static_suffix_size, stamp); + DnsLayer dnsLayer(data.get(), size, nullptr, &dummy_packet); + if (!_filtering(dnsLayer, dir, flowKey, stamp) && _configs(dnsLayer)) { + _metrics->process_dns_layer(dnsLayer, dir, l3Type, pcpp::TCP, flowKey, port, _static_suffix_size, stamp); _static_suffix_size = 0; - // signal for chained stream handlers, if we have any - if (_event_proxy) { - dummy_packet.detachLayer(dnsLayer); - if (conn.srcIP.isIPv4()) { - dummy_packet.addLayer(new pcpp::IPv4Layer(conn.srcIP.getIPv4(), conn.dstIP.getIPv4()), true); - } else { - dummy_packet.addLayer(new pcpp::IPv6Layer(conn.srcIP.getIPv6(), conn.dstIP.getIPv6()), true); - } - dummy_packet.addLayer(new pcpp::TcpLayer(conn.srcPort, conn.dstPort), true); - dummy_packet.addLayer(dnsLayer, true); - static_cast(_event_proxy.get())->tcp_reassembled_signal(dummy_packet, dir, l3Type, flowKey, stamp); - } } // data is freed upon return }; @@ -495,17 +407,10 @@ inline bool DnsStreamHandler::_filtering(DnsLayer &payload, PacketDirection dir, if (_f_enabled[Filters::DisableOut] && dir == PacketDirection::toHost) { goto will_filter; } - if (_f_enabled[Filters::ExcludingRCode]) { - auto rcode = payload.getDnsHeader()->responseCode; - if (std::any_of(_f_rcodes.begin(), _f_rcodes.end(), [rcode](uint16_t f_rcode) { return rcode == f_rcode; })) { - goto will_filter; - } - } - if (_f_enabled[Filters::OnlyRCode]) { - auto rcode = payload.getDnsHeader()->responseCode; - if (std::none_of(_f_rcodes.begin(), _f_rcodes.end(), [rcode](uint16_t f_rcode) { return rcode == f_rcode; })) { - goto will_filter; - } + if (_f_enabled[Filters::ExcludingRCode] && payload.getDnsHeader()->responseCode == _f_rcode) { + goto will_filter; + } else if (_f_enabled[Filters::OnlyRCode] && payload.getDnsHeader()->responseCode != _f_rcode) { + goto will_filter; } if (_f_enabled[Filters::AnswerCount] && payload.getAnswerCount() != _f_answer_count) { goto will_filter; @@ -573,33 +478,22 @@ inline bool DnsStreamHandler::_filtering(DnsLayer &payload, PacketDirection dir, goto will_filter; } } + } - if (_f_enabled[Filters::OnlyQName]) { - if (!payload.parseResources(true) || payload.getFirstQuery() == nullptr) { - goto will_filter; - } - std::string_view qname_ci = payload.getFirstQuery()->getNameLower(); - if (std::none_of(_f_qnames.begin(), _f_qnames.end(), [&qname_ci](std::string fqn) { return qname_ci == fqn; })) { - // checked the whole list and none of them matched: filter - goto will_filter; - } + if (_f_enabled[Filters::OnlyQNameSuffix]) { + if (!payload.parseResources(true) || payload.getFirstQuery() == nullptr) { + goto will_filter; } - - if (_f_enabled[Filters::OnlyQNameSuffix]) { - if (!payload.parseResources(true) || payload.getFirstQuery() == nullptr) { - goto will_filter; - } - std::string_view qname_ci = payload.getFirstQuery()->getNameLower(); - if (std::none_of(_f_qnames_suffix.begin(), _f_qnames_suffix.end(), [this, &qname_ci](std::string fqn) { - if (ends_with(qname_ci, fqn)) { - _static_suffix_size = fqn.size(); - return true; - } - return false; - })) { - // checked the whole list and none of them matched: filter - goto will_filter; - } + std::string_view qname_ci = payload.getFirstQuery()->getNameLower(); + if (std::none_of(_f_qnames.begin(), _f_qnames.end(), [this, qname_ci](std::string fqn) { + if (ends_with(qname_ci, fqn)) { + _static_suffix_size = fqn.size(); + return true; + } + return false; + })) { + // checked the whole list and none of them matched: filter + goto will_filter; } } @@ -622,56 +516,51 @@ void DnsMetricsBucket::specialized_merge(const AbstractMetricsBucket &o, Metric: // static because caller guarantees only our own bucket type const auto &other = static_cast(o); - // generate transaction directions if they do not exist - for (auto &dns : other._dns) { - dir_setup(dns.first); - } - // rates maintain their own thread safety - for (auto &dns : other._dns) { - group_enabled(group::DnsMetrics::Quantiles) ? _dns.at(dns.first).dnsRate.merge(dns.second.dnsRate, agg_operator) : void(); + for (auto &dns : _dns) { + group_enabled(group::DnsMetrics::Quantiles) ? dns.second.dnsRate.merge(other._dns.at(dns.first).dnsRate, agg_operator) : void(); } std::shared_lock r_lock(other._mutex); std::unique_lock w_lock(_mutex); group_enabled(group::DnsMetrics::Counters) ? _filtered += other._filtered : void(); - for (auto &dns : other._dns) { - group_enabled(group::DnsMetrics::Counters) ? _dns.at(dns.first).counters += dns.second.counters : void(); - group_enabled(group::DnsMetrics::Cardinality) ? _dns.at(dns.first).qnameCard.merge(dns.second.qnameCard) : void(); + for (auto &dns : _dns) { + + group_enabled(group::DnsMetrics::Counters) ? dns.second.counters += other._dns.at(dns.first).counters : void(); + group_enabled(group::DnsMetrics::Cardinality) ? dns.second.qnameCard.merge(other._dns.at(dns.first).qnameCard) : void(); if (group_enabled(group::DnsMetrics::TopEcs)) { - _dns.at(dns.first).topGeoLocECS.merge(dns.second.topGeoLocECS); - _dns.at(dns.first).topASNECS.merge(dns.second.topASNECS); - _dns.at(dns.first).topQueryECS.merge(dns.second.topQueryECS); + dns.second.topGeoLocECS.merge(other._dns.at(dns.first).topGeoLocECS); + dns.second.topASNECS.merge(other._dns.at(dns.first).topASNECS); + dns.second.topQueryECS.merge(other._dns.at(dns.first).topQueryECS); } if (group_enabled(group::DnsMetrics::TopRcodes)) { - _dns.at(dns.first).topNX.merge(dns.second.topNX); - _dns.at(dns.first).topREFUSED.merge(dns.second.topREFUSED); - _dns.at(dns.first).topSRVFAIL.merge(dns.second.topSRVFAIL); - _dns.at(dns.first).topNODATA.merge(dns.second.topNODATA); - _dns.at(dns.first).topNOERROR.merge(dns.second.topNOERROR); - _dns.at(dns.first).topRCode.merge(dns.second.topRCode); + dns.second.topNX.merge(other._dns.at(dns.first).topNX); + dns.second.topREFUSED.merge(other._dns.at(dns.first).topREFUSED); + dns.second.topSRVFAIL.merge(other._dns.at(dns.first).topSRVFAIL); + dns.second.topNODATA.merge(other._dns.at(dns.first).topNODATA); + dns.second.topNOERROR.merge(other._dns.at(dns.first).topNOERROR); + dns.second.topRCode.merge(other._dns.at(dns.first).topRCode); } if (group_enabled(group::DnsMetrics::TopQnames)) { - _dns.at(dns.first).topQname2.merge(dns.second.topQname2); - _dns.at(dns.first).topQname3.merge(dns.second.topQname3); + dns.second.topQname2.merge(other._dns.at(dns.first).topQname2); + dns.second.topQname3.merge(other._dns.at(dns.first).topQname3); } if (group_enabled(group::DnsMetrics::TopSize)) { - _dns.at(dns.first).topSizedQnameResp.merge(dns.second.topSizedQnameResp); - _dns.at(dns.first).dnsRatio.merge(dns.second.dnsRatio, agg_operator); + dns.second.topSizedQnameResp.merge(other._dns.at(dns.first).topSizedQnameResp); + dns.second.dnsRatio.merge(other._dns.at(dns.first).dnsRatio, agg_operator); } - group_enabled(group::DnsMetrics::TopPorts) ? _dns.at(dns.first).topUDPPort.merge(dns.second.topUDPPort) : void(); - group_enabled(group::DnsMetrics::TopQtypes) ? _dns.at(dns.first).topQType.merge(dns.second.topQType) : void(); + group_enabled(group::DnsMetrics::TopPorts) ? dns.second.topUDPPort.merge(other._dns.at(dns.first).topUDPPort) : void(); + group_enabled(group::DnsMetrics::TopQtypes) ? dns.second.topQType.merge(other._dns.at(dns.first).topQType) : void(); if (group_enabled(group::DnsMetrics::XactTimes)) { - _dns.at(dns.first).dnsTimeUs.merge(dns.second.dnsTimeUs, agg_operator); - _dns.at(dns.first).dnsHistTimeUs.merge(dns.second.dnsHistTimeUs); - _dns.at(dns.first).topSlow.merge(dns.second.topSlow); + dns.second.dnsTimeUs.merge(other._dns.at(dns.first).dnsTimeUs, agg_operator); + dns.second.topSlow.merge(other._dns.at(dns.first).topSlow); } } } @@ -751,7 +640,6 @@ void DnsMetricsBucket::to_json(json &j) const if (group_enabled(group::DnsMetrics::XactTimes)) { dns.second.dnsTimeUs.to_json(j[_dir_str.at(dns.first)]); - dns.second.dnsHistTimeUs.to_json(j[_dir_str.at(dns.first)]); dns.second.topSlow.to_json(j[_dir_str.at(dns.first)]); } } @@ -834,95 +722,11 @@ void DnsMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap ad if (group_enabled(group::DnsMetrics::XactTimes)) { dns.second.dnsTimeUs.to_prometheus(out, dir_labels); - dns.second.dnsHistTimeUs.to_prometheus(out, dir_labels); dns.second.topSlow.to_prometheus(out, dir_labels); } } } -void DnsMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - for (auto &dns : _dns) { - auto dir_labels = add_labels; - dir_labels["direction"] = _dir_str.at(dns.first); - group_enabled(group::DnsMetrics::Quantiles) ? dns.second.dnsRate.to_opentelemetry(scope, start_ts, end_ts, dir_labels) : void(); - } - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - - group_enabled(group::DnsMetrics::Counters) ? _filtered.to_opentelemetry(scope, start_ts, end_ts, add_labels) : void(); - - for (auto &dns : _dns) { - auto dir_labels = add_labels; - dir_labels["direction"] = _dir_str.at(dns.first); - - group_enabled(group::DnsMetrics::Counters) ? dns.second.counters.to_opentelemetry(scope, start_ts, end_ts, dir_labels) : void(); - group_enabled(group::DnsMetrics::Cardinality) ? dns.second.qnameCard.to_opentelemetry(scope, start_ts, end_ts, dir_labels) : void(); - group_enabled(group::DnsMetrics::TopPorts) ? dns.second.topUDPPort.to_opentelemetry(scope, start_ts, end_ts, dir_labels, [](const uint16_t &val) { return std::to_string(val); }) : void(); - - if (group_enabled(group::DnsMetrics::TopEcs)) { - dns.second.topGeoLocECS.to_opentelemetry(scope, start_ts, end_ts, dir_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - dns.second.topASNECS.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topQueryECS.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - - if (group_enabled(group::DnsMetrics::TopRcodes)) { - dns.second.topNX.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topREFUSED.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topSRVFAIL.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topNODATA.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topNOERROR.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topRCode.to_opentelemetry(scope, start_ts, end_ts, dir_labels, [](const uint16_t &val) { - if (RCodeNames.find(val) != RCodeNames.end()) { - return RCodeNames[val]; - } else { - return std::to_string(val); - } - }); - } - - if (group_enabled(group::DnsMetrics::TopQnames)) { - dns.second.topQname2.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topQname3.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - - if (group_enabled(group::DnsMetrics::TopSize)) { - dns.second.topSizedQnameResp.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.dnsRatio.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - - if (group_enabled(group::DnsMetrics::TopQtypes)) { - dns.second.topQType.to_opentelemetry(scope, start_ts, end_ts, dir_labels, [](const uint16_t &val) { - if (QTypeNames.find(val) != QTypeNames.end()) { - return QTypeNames[val]; - } else { - return std::to_string(val); - } - }); - } - - if (group_enabled(group::DnsMetrics::XactTimes)) { - dns.second.dnsTimeUs.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.dnsHistTimeUs.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - dns.second.topSlow.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - } -} - void DnsMetricsBucket::new_dns_transaction(bool deep, float per90th, DnsLayer &payload, TransactionDirection dir, DnsTransaction xact, pcpp::ProtocolType l3, Protocol l4, uint16_t port, size_t suffix_size) { @@ -1015,10 +819,7 @@ void DnsMetricsBucket::new_dns_transaction(bool deep, float per90th, DnsLayer &p data.topUDPPort.update(port); } - if (group_enabled(group::DnsMetrics::XactTimes)) { - data.dnsTimeUs.update(xactTime); - data.dnsHistTimeUs.update(xactTime); - } + group_enabled(group::DnsMetrics::XactTimes) ? data.dnsTimeUs.update(xactTime) : void(); auto success = payload.parseResources(true); if (!success) { @@ -1111,7 +912,7 @@ void DnsMetricsManager::process_dns_layer(DnsLayer &payload, PacketDirection dir } else if (dir == PacketDirection::fromHost) { xact_dir = TransactionDirection::in; } - auto xact = _pair_manager[xact_dir].xact_map->maybe_end_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), stamp); + auto xact = _pair_manager[xact_dir].xact_map.maybe_end_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), stamp); live_bucket()->dir_setup(xact_dir); if (xact.first == Result::Valid && !xact.second.filtered) { live_bucket()->new_dns_transaction(_deep_sampling_now, _pair_manager[xact_dir].per_90th, payload, xact_dir, xact.second, l3, static_cast(l4), port, suffix_size); @@ -1140,7 +941,7 @@ void DnsMetricsManager::process_dns_layer(DnsLayer &payload, PacketDirection dir subnet = ecs->client_subnet; } } - _pair_manager[xact_dir].xact_map->start_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), + _pair_manager[xact_dir].xact_map.start_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), {{stamp, {0, 0}}, false, payload.getDataLen(), static_cast(payload.getDnsHeader()->checkingDisabled), subnet}); } } @@ -1158,7 +959,7 @@ void DnsMetricsManager::process_filtered(timespec stamp, DnsLayer &payload, Pack } else if (dir == PacketDirection::fromHost) { xact_dir = TransactionDirection::in; } - auto xact = _pair_manager[xact_dir].xact_map->maybe_end_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), stamp); + auto xact = _pair_manager[xact_dir].xact_map.maybe_end_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), stamp); live_bucket()->dir_setup(xact_dir); if (xact.first == Result::Valid && !xact.second.filtered) { live_bucket()->process_filtered(); @@ -1169,7 +970,7 @@ void DnsMetricsManager::process_filtered(timespec stamp, DnsLayer &payload, Pack } else if (dir == PacketDirection::fromHost) { xact_dir = TransactionDirection::out; } - _pair_manager[xact_dir].xact_map->start_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), {{stamp, {0, 0}}, true, 0, false, std::string()}); + _pair_manager[xact_dir].xact_map.start_transaction(DnsXactID(flowkey, payload.getDnsHeader()->transactionID), {{stamp, {0, 0}}, true, 0, false, std::string()}); } live_bucket()->process_filtered(); } @@ -1260,7 +1061,7 @@ void DnsMetricsManager::process_dnstap(const dnstap::Dnstap &payload, bool filte uint8_t *buf = new uint8_t[query.size()]; std::memcpy(buf, query.c_str(), query.size()); DnsLayer dpayload(buf, query.size(), nullptr, nullptr); - auto xact = _pair_manager[xact_dir].xact_map->maybe_end_transaction(DnsXactID(dpayload.getDnsHeader()->transactionID, 2), stamp); + auto xact = _pair_manager[xact_dir].xact_map.maybe_end_transaction(DnsXactID(dpayload.getDnsHeader()->transactionID, 2), stamp); live_bucket()->dir_setup(xact_dir); if (xact.first == Result::Valid) { // process in the "live" bucket. this will parse the resources if we are deep sampling @@ -1275,7 +1076,7 @@ void DnsMetricsManager::process_dnstap(const dnstap::Dnstap &payload, bool filte uint8_t *buf = new uint8_t[query.size()]; std::memcpy(buf, query.c_str(), query.size()); DnsLayer dpayload(buf, query.size(), nullptr, nullptr); - _pair_manager[xact_dir].xact_map->start_transaction(DnsXactID(dpayload.getDnsHeader()->transactionID, 2), {{stamp, {0, 0}}, false, payload.message().query_message().size(), false, std::string()}); + _pair_manager[xact_dir].xact_map.start_transaction(DnsXactID(dpayload.getDnsHeader()->transactionID, 2), {{stamp, {0, 0}}, false, payload.message().query_message().size(), false, std::string()}); } } } diff --git a/src/handlers/dns/v2/DnsStreamHandler.h b/src/handlers/dns/v2/DnsStreamHandler.h index 1bd18e5c7..a5c067aed 100644 --- a/src/handlers/dns/v2/DnsStreamHandler.h +++ b/src/handlers/dns/v2/DnsStreamHandler.h @@ -106,12 +106,12 @@ struct DnsDirection { , DOQ(DNS_SCHEMA, {"doq_xacts"}, "Total DNS transactions (query/reply pairs) received over DNS over QUIC") , IPv4(DNS_SCHEMA, {"ipv4_xacts"}, "Total DNS transactions (query/reply pairs) received over IPv4") , IPv6(DNS_SCHEMA, {"ipv6_xacts"}, "Total DNS transactions (query/reply pairs) received over IPv6") - , NX(DNS_SCHEMA, {"nxdomain_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with response code NXDOMAIN") + , NX(DNS_SCHEMA, {"nxdomain_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with return code NXDOMAIN") , ECS(DNS_SCHEMA, {"ecs_xacts"}, "Total DNS transactions (query/reply pairs) with the EDNS Client Subnet option set") - , REFUSED(DNS_SCHEMA, {"refused_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with response code REFUSED") - , SRVFAIL(DNS_SCHEMA, {"srvfail_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with response code SRVFAIL") - , RNOERROR(DNS_SCHEMA, {"noerror_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with response code NOERROR") - , NODATA(DNS_SCHEMA, {"nodata_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with response code NOERROR but with an empty answers section") + , REFUSED(DNS_SCHEMA, {"refused_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with return code REFUSED") + , SRVFAIL(DNS_SCHEMA, {"srvfail_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with return code SRVFAIL") + , RNOERROR(DNS_SCHEMA, {"noerror_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with return code NOERROR") + , NODATA(DNS_SCHEMA, {"nodata_xacts"}, "Total DNS transactions (query/reply pairs) flagged as reply with return code NOERROR but with an empty answers section") , authData(DNS_SCHEMA, {"authenticated_data_xacts"}, "Total DNS transactions (query/reply pairs) with the AD flag set in the response") , authAnswer(DNS_SCHEMA, {"authoritative_answer_xacts"}, "Total DNS transactions (query/reply pairs) with the AA flag set in the response") , checkDisabled(DNS_SCHEMA, {"checking_disabled_xacts"}, "Total DNS transactions (query/reply pairs) with the CD flag set in the query") @@ -194,36 +194,10 @@ struct DnsDirection { timeout.to_prometheus(out, add_labels); orphan.to_prometheus(out, add_labels); } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels) const - { - xacts.to_opentelemetry(scope, start, end, add_labels); - UDP.to_opentelemetry(scope, start, end, add_labels); - TCP.to_opentelemetry(scope, start, end, add_labels); - DOT.to_opentelemetry(scope, start, end, add_labels); - DOH.to_opentelemetry(scope, start, end, add_labels); - cryptUDP.to_opentelemetry(scope, start, end, add_labels); - cryptTCP.to_opentelemetry(scope, start, end, add_labels); - DOQ.to_opentelemetry(scope, start, end, add_labels); - IPv4.to_opentelemetry(scope, start, end, add_labels); - IPv6.to_opentelemetry(scope, start, end, add_labels); - NX.to_opentelemetry(scope, start, end, add_labels); - ECS.to_opentelemetry(scope, start, end, add_labels); - REFUSED.to_opentelemetry(scope, start, end, add_labels); - SRVFAIL.to_opentelemetry(scope, start, end, add_labels); - RNOERROR.to_opentelemetry(scope, start, end, add_labels); - NODATA.to_opentelemetry(scope, start, end, add_labels); - authData.to_opentelemetry(scope, start, end, add_labels); - authAnswer.to_opentelemetry(scope, start, end, add_labels); - checkDisabled.to_opentelemetry(scope, start, end, add_labels); - timeout.to_opentelemetry(scope, start, end, add_labels); - orphan.to_opentelemetry(scope, start, end, add_labels); - } }; Counters counters; Quantile dnsTimeUs; - Histogram dnsHistTimeUs; Quantile dnsRatio; Rate dnsRate; @@ -248,13 +222,12 @@ struct DnsDirection { DnsDirection() : counters() , dnsTimeUs(DNS_SCHEMA, {"xact_time_us"}, "Quantiles of transaction timing (query/reply pairs) in microseconds") - , dnsHistTimeUs(DNS_SCHEMA, {"xact_histogram_us"}, "Histogram of transaction timing (query/reply pairs) in microseconds") , dnsRatio(DNS_SCHEMA, {"response_query_size_ratio"}, "Quantiles of ratio of packet sizes in a DNS transaction (reply/query)") - , dnsRate(DNS_SCHEMA, {"xact_rates"}, "Rate of all DNS transaction (reply/query) per second") + , dnsRate(DNS_SCHEMA, {"dns_xact_rates"}, "Rate of all DNS transaction (reply/query) per second") , qnameCard(DNS_SCHEMA, {"cardinality", "qname"}, "Cardinality of unique QNAMES, both ingress and egress") , topGeoLocECS(DNS_SCHEMA, "geo_loc", {"top_geo_loc_ecs_xacts"}, "Top GeoIP ECS locations") , topASNECS(DNS_SCHEMA, "asn", {"top_asn_ecs_xacts"}, "Top ASNs by ECS") - , topQueryECS(DNS_SCHEMA, "ecs", {"top_ecs_xacts"}, "Top EDNS Client Subnet (ECS) observed in DNS transaction") + , topQueryECS(DNS_SCHEMA, "ecs", {"top_query_ecs_xacts"}, "Top EDNS Client Subnet (ECS) observed in DNS queries") , topQname2(DNS_SCHEMA, "qname", {"top_qname2_xacts"}, "Top QNAMES, aggregated at a depth of two labels") , topQname3(DNS_SCHEMA, "qname", {"top_qname3_xacts"}, "Top QNAMES, aggregated at a depth of three labels") , topNX(DNS_SCHEMA, "qname", {"top_nxdomain_xacts"}, "Top QNAMES with result code NXDOMAIN") @@ -307,7 +280,6 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket DnsMetricsBucket() : _filtered(DNS_SCHEMA, {"filtered_packets"}, "Total DNS wire packets seen that did not match the configured filter(s) (if any)") { - set_event_rate_info(DNS_SCHEMA, {"rates", "observed_pps"}, "Rate of all DNS wire packets before filtering per second"); set_num_events_info(DNS_SCHEMA, {"observed_packets"}, "Total DNS wire packets events"); set_num_sample_info(DNS_SCHEMA, {"deep_sampled_packets"}, "Total DNS wire packets that were sampled for deep inspection"); } @@ -362,7 +334,6 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) override { _topn_count = topn_count; @@ -384,42 +355,31 @@ class DnsMetricsBucket final : public visor::AbstractMetricsBucket class DnsMetricsManager final : public visor::AbstractMetricsManager { using DnsXactID = std::pair; - typedef TransactionManager DnsTransactionManager; struct DirTransaction { - std::unique_ptr xact_map; + TransactionManager xact_map; float per_90th{0.0}; - - DirTransaction() - : xact_map(std::make_unique()) - { - } - DirTransaction(uint32_t ttl) - : xact_map(std::make_unique(ttl)) - { - } }; - std::map _pair_manager; + std::map _pair_manager = {{TransactionDirection::in, DirTransaction()}, + {TransactionDirection::out, DirTransaction()}, + {TransactionDirection::unknown, DirTransaction()}}; public: DnsMetricsManager(const Configurable *window_config) : visor::AbstractMetricsManager(window_config) { - _pair_manager[TransactionDirection::in] = DirTransaction(); - _pair_manager[TransactionDirection::out] = DirTransaction(); - _pair_manager[TransactionDirection::unknown] = DirTransaction(); } void on_period_shift(timespec stamp, [[maybe_unused]] const DnsMetricsBucket *maybe_expiring_bucket) override { // DNS transaction support for (auto &manager : _pair_manager) { - if (auto timed_out = manager.second.xact_map->purge_old_transactions(stamp); timed_out) { - live_bucket()->dir_setup(manager.first); + if (auto timed_out = manager.second.xact_map.purge_old_transactions(stamp); timed_out && live_bucket()->has_dir(manager.first)) { live_bucket()->inc_xact_timed_out(timed_out, manager.first); } if (bucket(1)->has_dir(manager.first)) { - auto [xact, lock] = bucket(1)->get_xact_data_locked(manager.first); - xact.get_n() ? manager.second.per_90th = xact.get_quantile(0.90) : float(); + if (auto [xact, lock] = bucket(1)->get_xact_data_locked(manager.first); xact.get_n()) { + manager.second.per_90th = xact.get_quantile(0.90); + } } } } @@ -428,18 +388,11 @@ class DnsMetricsManager final : public visor::AbstractMetricsManageropen_transaction_count(); + count += manager.second.xact_map.open_transaction_count(); } return count; } - void set_xact_ttl(uint32_t ttl) - { - _pair_manager[TransactionDirection::in] = DirTransaction(ttl); - _pair_manager[TransactionDirection::out] = DirTransaction(ttl); - _pair_manager[TransactionDirection::unknown] = DirTransaction(ttl); - } - void process_filtered(timespec stamp) { new_event(stamp, false); @@ -487,7 +440,6 @@ class DnsStreamHandler final : public visor::StreamMetricsHandler _c_enabled; - std::vector _f_rcodes; + uint16_t _f_rcode{0}; uint64_t _f_answer_count{0}; - std::vector _f_qnames_suffix; std::vector _f_qnames; std::vector _f_qtypes; size_t _static_suffix_size{0}; @@ -552,15 +501,12 @@ class DnsStreamHandler final : public visor::StreamMetricsHandler #include #include #include +#include #include #ifdef __GNUC__ #pragma GCC diagnostic pop @@ -126,39 +127,6 @@ TEST_CASE("Parse DNS TCP IPv4 tests", "[pcap][ipv4][tcp][dns]") CHECK(j["unknown"]["top_qname2_xacts"][0]["estimate"] == 210); } -TEST_CASE("Parse DNS TCP tests with limit", "[pcap][ipv4][tcp][dns]") -{ - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_tcp.pcap"); - stream.config_set("bpf", ""); - stream.config_set("tcp_packet_reassembly_cache_limit", 10); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - - dns_handler.start(); - stream.start(); - dns_handler.stop(); - stream.stop(); - - auto counters = dns_handler.metrics()->bucket(0)->counters(TransactionDirection::unknown); - auto event_data = dns_handler.metrics()->bucket(0)->event_data_locked(); - json j; - dns_handler.metrics()->bucket(0)->to_json(j); - - CHECK(event_data.num_events->value() == 140); - CHECK(counters.TCP.value() == 70); - CHECK(counters.IPv4.value() == 70); - CHECK(counters.IPv6.value() == 0); - CHECK(counters.xacts.value() == 70); - CHECK(counters.timeout.value() == 0); - CHECK(counters.orphan.value() == 0); - CHECK(j["unknown"]["top_qname2_xacts"][0]["name"] == ".test.com"); - CHECK(j["unknown"]["top_qname2_xacts"][0]["estimate"] == 70); -} - TEST_CASE("Parse DNS UDP IPv6 tests", "[pcap][ipv6][udp][dns]") { @@ -363,7 +331,7 @@ TEST_CASE("DNS Filters: only_rcode nx", "[pcap][net]") REQUIRE(j["filtered_packets"] == 19); } -TEST_CASE("DNS Filters: only_rcode refused and nx", "[pcap][dns]") +TEST_CASE("DNS Filters: only_rcode refused", "[pcap][dns]") { PcapInputStream stream{"pcap-test"}; @@ -377,7 +345,7 @@ TEST_CASE("DNS Filters: only_rcode refused and nx", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - dns_handler.config_set("only_rcode", {"nxdomain", "5"}); + dns_handler.config_set("only_rcode", Refused); dns_handler.start(); stream.start(); @@ -388,11 +356,11 @@ TEST_CASE("DNS Filters: only_rcode refused and nx", "[pcap][dns]") REQUIRE(counters.RNOERROR.value() == 0); REQUIRE(counters.SRVFAIL.value() == 0); REQUIRE(counters.REFUSED.value() == 1); - REQUIRE(counters.NX.value() == 1); + REQUIRE(counters.NX.value() == 0); REQUIRE(counters.NODATA.value() == 0); nlohmann::json j; dns_handler.metrics()->bucket(0)->to_json(j); - REQUIRE(j["filtered_packets"] == 17); + REQUIRE(j["filtered_packets"] == 19); } TEST_CASE("DNS Filters: only_qtypes AAAA and TXT", "[pcap][dns]") { @@ -408,7 +376,7 @@ TEST_CASE("DNS Filters: only_qtypes AAAA and TXT", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - // notice case-insensitive + // notice case insensitive dns_handler.config_set("only_qtype", {"AAAA", "TxT"}); dns_handler.start(); stream.start(); @@ -474,46 +442,6 @@ TEST_CASE("DNS TopN custom size", "[pcap][dns]") CHECK(j["out"]["top_qtype_xacts"][3] == nullptr); } -TEST_CASE("DNS Filters: only_qname", "[pcap][dns]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_mixed_rcode.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - - // notice, case-insensitive - dns_handler.config_set("only_qname", {"play.GooGle.com", "nonexistent.google.com"}); - dns_handler.start(); - stream.start(); - stream.stop(); - dns_handler.stop(); - - auto counters = dns_handler.metrics()->bucket(0)->counters(TransactionDirection::out); - - CHECK(counters.UDP.value() == 2); - CHECK(counters.RNOERROR.value() == 1); - CHECK(counters.SRVFAIL.value() == 0); - CHECK(counters.REFUSED.value() == 0); - CHECK(counters.NX.value() == 1); - CHECK(counters.NODATA.value() == 1); - CHECK(counters.xacts.value() == 2); - CHECK(counters.timeout.value() == 0); - CHECK(counters.orphan.value() == 3); - - nlohmann::json j; - dns_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["out"]["top_qname2_xacts"][0]["name"] == ".google.com"); - CHECK(j["out"]["top_qname3_xacts"][0]["name"] != nullptr); -} - TEST_CASE("DNS Filters: only_qname_suffix", "[pcap][dns]") { @@ -528,7 +456,7 @@ TEST_CASE("DNS Filters: only_qname_suffix", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - // notice, case-insensitive + // notice, case insensitive dns_handler.config_set("only_qname_suffix", {"GooGle.com"}); dns_handler.start(); stream.start(); @@ -545,14 +473,15 @@ TEST_CASE("DNS Filters: only_qname_suffix", "[pcap][dns]") CHECK(counters.NODATA.value() == 1); CHECK(counters.xacts.value() == 4); CHECK(counters.timeout.value() == 0); - CHECK(counters.orphan.value() == 3); + CHECK(counters.orphan.value() == 1); nlohmann::json j; dns_handler.metrics()->bucket(0)->to_json(j); - REQUIRE(j["filtered_packets"] == 12); + REQUIRE(j["filtered_packets"] == 14); CHECK(j["out"]["top_qname2_xacts"][0]["name"].get().find("google.com") != std::string::npos); + CHECK(j["out"]["top_qname3_xacts"][0]["name"] == nullptr); } TEST_CASE("DNS Filters: answer_count", "[pcap][dns]") @@ -654,7 +583,7 @@ TEST_CASE("DNS Configs: public_suffix_list", "[pcap][dns]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - // notice, case-insensitive + // notice, case insensitive dns_handler.config_set("public_suffix_list", true); dns_handler.start(); stream.start(); @@ -719,9 +648,9 @@ TEST_CASE("Parse DNS with ECS data", "[pcap][dns][ecs]") CHECK(j["unknown"]["cardinality"]["qname"] == 8); - CHECK(j["unknown"]["top_ecs_xacts"][0]["name"] == "2001:470:1f0b:1600::"); // wireshark - CHECK(j["unknown"]["top_ecs_xacts"][0]["estimate"] == 2); - CHECK(j["unknown"]["top_ecs_xacts"][1] == nullptr); + CHECK(j["unknown"]["top_query_ecs_xacts"][0]["name"] == "2001:470:1f0b:1600::"); // wireshark + CHECK(j["unknown"]["top_query_ecs_xacts"][0]["estimate"] == 2); + CHECK(j["unknown"]["top_query_ecs_xacts"][1] == nullptr); CHECK(j["unknown"]["top_geo_loc_ecs_xacts"][0]["name"] == "Unknown"); CHECK(j["unknown"]["top_geo_loc_ecs_xacts"][0]["estimate"] == 2); CHECK(j["unknown"]["top_asn_ecs_xacts"][0]["name"] == "Unknown"); @@ -761,19 +690,19 @@ TEST_CASE("DNS filter: GeoLoc not found", "[pcap][dns][ecs]") CHECK(counters.IPv6.value() == 2); CHECK(counters.xacts.value() == 2); CHECK(counters.timeout.value() == 0); - CHECK(counters.orphan.value() == 2); + CHECK(counters.orphan.value() == 0); CHECK(counters.ECS.value() == 2); nlohmann::json j; dns_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["filtered_packets"] == 27); + CHECK(j["filtered_packets"] == 29); CHECK(j["unknown"]["cardinality"]["qname"] == 1); - CHECK(j["unknown"]["top_ecs_xacts"][0]["name"] == "2001:470:1f0b:1600::"); // wireshark - CHECK(j["unknown"]["top_ecs_xacts"][0]["estimate"] == 2); - CHECK(j["unknown"]["top_ecs_xacts"][1] == nullptr); + CHECK(j["unknown"]["top_query_ecs_xacts"][0]["name"] == "2001:470:1f0b:1600::"); // wireshark + CHECK(j["unknown"]["top_query_ecs_xacts"][0]["estimate"] == 2); + CHECK(j["unknown"]["top_query_ecs_xacts"][1] == nullptr); CHECK(j["unknown"]["top_geo_loc_ecs_xacts"][0]["name"] == "Unknown"); CHECK(j["unknown"]["top_geo_loc_ecs_xacts"][0]["estimate"] == 2); } @@ -811,19 +740,19 @@ TEST_CASE("DNS filter: ASN not found", "[pcap][dns][ecs]") CHECK(counters.IPv6.value() == 2); CHECK(counters.xacts.value() == 2); CHECK(counters.timeout.value() == 0); - CHECK(counters.orphan.value() == 2); + CHECK(counters.orphan.value() == 0); CHECK(counters.ECS.value() == 2); nlohmann::json j; dns_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["filtered_packets"] == 27); + CHECK(j["filtered_packets"] == 29); CHECK(j["unknown"]["cardinality"]["qname"] == 1); - CHECK(j["unknown"]["top_ecs_xacts"][0]["name"] == "2001:470:1f0b:1600::"); // wireshark - CHECK(j["unknown"]["top_ecs_xacts"][0]["estimate"] == 2); - CHECK(j["unknown"]["top_ecs_xacts"][1] == nullptr); + CHECK(j["unknown"]["top_query_ecs_xacts"][0]["name"] == "2001:470:1f0b:1600::"); // wireshark + CHECK(j["unknown"]["top_query_ecs_xacts"][0]["estimate"] == 2); + CHECK(j["unknown"]["top_query_ecs_xacts"][1] == nullptr); CHECK(j["unknown"]["top_asn_ecs_xacts"][0]["name"] == "Unknown"); CHECK(j["unknown"]["top_asn_ecs_xacts"][0]["estimate"] == 2); } @@ -844,7 +773,7 @@ TEST_CASE("DNS filter exceptions", "[pcap][dns][filter]") SECTION("only_rcode as string") { dns_handler.config_set("only_rcode", "1"); - REQUIRE_THROWS_WITH(dns_handler.start(), "DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer or an array"); + REQUIRE_THROWS_WITH(dns_handler.start(), "DnsStreamHandler: wrong value type for only_rcode filter. It should be an integer"); } SECTION("only_rcode invalid") @@ -888,7 +817,7 @@ TEST_CASE("DNS groups", "[pcap][dns]") SECTION("disable cardinality and counters") { dns_handler.config_set("disable", {"cardinality", "counters"}); - dns_handler.config_set("enable", {"top_size", "xact_times"}); + dns_handler.config_set("enable", {"top_size"}); dns_handler.start(); stream.start(); @@ -914,8 +843,6 @@ TEST_CASE("DNS groups", "[pcap][dns]") CHECK(j["out"]["cardinality"]["qname"] == nullptr); CHECK(j["out"]["top_qname2_xacts"][0]["name"] == ".test.com"); CHECK(j["out"]["response_query_size_ratio"]["p50"] != nullptr); - CHECK(j["out"]["xact_time_us"]["p50"] != nullptr); - CHECK(j["out"]["xact_histogram_us"]["buckets"] != nullptr); } SECTION("disable TopQname and Dns Transactions") @@ -970,5 +897,5 @@ TEST_CASE("DNS invalid config", "[dns][filter][config]") c.config_set("num_periods", 1); DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; dns_handler.config_set("invalid_config", true); - REQUIRE_THROWS_WITH(dns_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: exclude_noerror, only_rcode, only_dnssec_response, answer_count, only_qtype, only_qname, only_qname_suffix, geoloc_notfound, asn_notfound, dnstap_msg_type, public_suffix_list, recorded_stream, xact_ttl_secs, xact_ttl_ms, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); + REQUIRE_THROWS_WITH(dns_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: exclude_noerror, only_rcode, only_dnssec_response, answer_count, only_qtype, only_qname_suffix, geoloc_notfound, asn_notfound, dnstap_msg_type, public_suffix_list, recorded_stream, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); } diff --git a/src/handlers/flow/FlowStreamHandler.cpp b/src/handlers/flow/FlowStreamHandler.cpp index aa90bcbba..7bfd10a96 100644 --- a/src/handlers/flow/FlowStreamHandler.cpp +++ b/src/handlers/flow/FlowStreamHandler.cpp @@ -4,7 +4,6 @@ #include "FlowStreamHandler.h" #include "HandlerModulePlugin.h" -#include "Tos.h" #include #include #ifdef _WIN32 @@ -15,58 +14,41 @@ namespace visor::handler::flow { -static std::string ip_summarization(const pcpp::IPAddress &ip, SummaryData *summary) +static std::string ip_summarization(const std::string &val, SummaryData *summary) { - if (summary && summary->type != IpSummary::None) { - if (ip.isIPv4() && match_subnet(summary->ipv4_exclude_summary, ip.getIPv4().toInt()).has_value()) { - return ip.toString(); - } else if (ip.isIPv6() && match_subnet(summary->ipv6_exclude_summary, ip.getIPv6().toBytes()).has_value()) { - return ip.toString(); + if (summary) { + pcpp::IPv4Address ipv4; + pcpp::IPv6Address ipv6; + if (ipv4 = pcpp::IPv4Address(val); ipv4.isValid() && match_subnet(summary->ipv4_exclude_summary, ipv4.toInt()).first) { + return val; + } else if (ipv6 = pcpp::IPv6Address(val); ipv6.isValid() && match_subnet(summary->ipv6_exclude_summary, ipv6.toBytes()).first) { + return val; } - bool check_subnet{false}; if (summary->type == IpSummary::ByASN && HandlerModulePlugin::asn->enabled()) { - std::string asn; - if (ip.isIPv4()) { + if (ipv4.isValid()) { sockaddr_in sa4{}; - if (lib::utils::ipv4_to_sockaddr(ip.getIPv4(), &sa4)) { - asn = HandlerModulePlugin::asn->getASNString(&sa4); + if (lib::utils::ipv4_to_sockaddr(ipv4, &sa4)) { + return HandlerModulePlugin::asn->getASNString(&sa4); } - } else if (ip.isIPv6()) { + } else if (ipv6.isValid()) { sockaddr_in6 sa6{}; - if (lib::utils::ipv6_to_sockaddr(ip.getIPv6(), &sa6)) { - asn = HandlerModulePlugin::asn->getASNString(&sa6); + if (lib::utils::ipv6_to_sockaddr(ipv6, &sa6)) { + return HandlerModulePlugin::asn->getASNString(&sa6); } } - if (summary->exclude_unknown_asns && asn == "Unknown") { - check_subnet = true; - } else if (!summary->asn_exclude_summary.empty() && std::any_of(summary->asn_exclude_summary.begin(), summary->asn_exclude_summary.end(), [&asn](const auto &prefix) { - return asn.size() >= prefix.size() && 0 == asn.compare(0, prefix.size(), prefix); - })) { - check_subnet = true; - } - if (!check_subnet) { - return asn; - } - } - if (summary->type == IpSummary::BySubnet || check_subnet) { - if (ip.isIPv4()) { - if (auto subnet = match_subnet(summary->ipv4_summary, ip.getIPv4().toInt()); subnet.has_value()) { - return subnet.value()->str; - } else if (summary->ipv4_wildcard.has_value()) { - auto cidr = summary->ipv4_wildcard.value().cidr; - return pcpp::IPv4Address(lib::utils::get_subnet(ip.getIPv4().toInt(), cidr)).toString() + "/" + std::to_string(cidr); - } - } else if (ip.isIPv6()) { - if (auto subnet = match_subnet(summary->ipv6_summary, ip.getIPv6().toBytes()); subnet.has_value()) { - return subnet.value()->str; - } else if (summary->ipv6_wildcard.has_value()) { - auto cidr = summary->ipv6_wildcard.value().cidr; - return pcpp::IPv6Address(lib::utils::get_subnet(ip.getIPv6().toBytes(), cidr).data()).toString() + "/" + std::to_string(cidr); + } else if (summary->type == IpSummary::BySubnet) { + if (ipv4.isValid()) { + if (auto [match, subnet] = match_subnet(summary->ipv4_summary, ipv4.toInt()); match) { + return subnet->str; + } + } else if (ipv6.isValid()) { + if (auto [match, subnet] = match_subnet(summary->ipv6_summary, ipv6.toBytes()); match) { + return subnet->str; } } } } - return ip.toString(); + return val; } FlowStreamHandler::FlowStreamHandler(const std::string &name, InputEventProxy *proxy, const Configurable *window_config) @@ -148,63 +130,18 @@ void FlowStreamHandler::start() } SummaryData summary_data; - if (config_exists("exclude_ips_from_summarization")) { - parse_host_specs(config_get("exclude_ips_from_summarization"), summary_data.ipv4_exclude_summary, summary_data.ipv6_exclude_summary); - } if (config_exists("summarize_ips_by_asn") && config_get("summarize_ips_by_asn")) { summary_data.type = IpSummary::ByASN; - if (config_exists("exclude_asns_from_summarization")) { - for (const auto &asn : config_get("exclude_asns_from_summarization")) { - summary_data.asn_exclude_summary.push_back(asn + "/"); - } - } - if (config_exists("exclude_unknown_asns_from_summarization")) { - summary_data.exclude_unknown_asns = config_get("exclude_unknown_asns_from_summarization"); - } - } - if (config_exists("subnets_for_summarization")) { - if (summary_data.type == IpSummary::None) { - summary_data.type = IpSummary::BySubnet; + if (config_exists("exclude_ips_from_summarization")) { + parse_host_specs(config_get("exclude_ips_from_summarization"), summary_data.ipv4_exclude_summary, summary_data.ipv6_exclude_summary); } + _metrics->set_summary_data(std::move(summary_data)); + } else if (config_exists("subnets_for_summarization")) { + summary_data.type = IpSummary::BySubnet; parse_host_specs(config_get("subnets_for_summarization"), summary_data.ipv4_summary, summary_data.ipv6_summary); - // check ipv4 wildcard - auto it_v4_remove = summary_data.ipv4_summary.end(); - for (auto it = summary_data.ipv4_summary.begin(); it != summary_data.ipv4_summary.end(); it++) { - if (!it->addr.s_addr) { - if (summary_data.ipv4_wildcard.has_value()) { - throw StreamHandlerException("FlowHandler: 'subnets_for_summarization' only allows one ipv4 and one ipv6 wildcard"); - } - summary_data.ipv4_wildcard = *it; - it_v4_remove = it; - } - } - if (it_v4_remove != summary_data.ipv4_summary.end()) { - summary_data.ipv4_summary.erase(it_v4_remove); + if (config_exists("exclude_ips_from_summarization")) { + parse_host_specs(config_get("exclude_ips_from_summarization"), summary_data.ipv4_exclude_summary, summary_data.ipv6_exclude_summary); } - // check ipv6 wildcard - auto it_v6_remove = summary_data.ipv6_summary.end(); - for (auto it = summary_data.ipv6_summary.begin(); it != summary_data.ipv6_summary.end(); it++) { - bool wildcard = true; - for (size_t i = 0; i < sizeof(it->addr.s6_addr); ++i) { - if (it->addr.s6_addr[i]) { - wildcard = false; - break; - } - } - if (wildcard) { - if (summary_data.ipv6_wildcard.has_value()) { - throw StreamHandlerException("FlowHandler: 'subnets_for_summarization' only allows one ipv4 and one ipv6 wildcard"); - } - summary_data.ipv6_wildcard = *it; - it_v6_remove = it; - } - } - if (it_v6_remove != summary_data.ipv6_summary.end()) { - summary_data.ipv6_summary.erase(it_v6_remove); - } - } - - if (summary_data.type != IpSummary::None) { _metrics->set_summary_data(std::move(summary_data)); } @@ -233,20 +170,6 @@ void FlowStreamHandler::start() _f_enabled.set(Filters::OnlyPorts); } - if (config_exists("only_directions")) { - _f_enabled.set(Filters::DisableIn); - _f_enabled.set(Filters::DisableOut); - for (const auto &dir : config_get("only_directions")) { - if (dir == "in") { - _f_enabled.reset(Filters::DisableIn); - } else if (dir == "out") { - _f_enabled.reset(Filters::DisableOut); - } else { - throw ConfigException(fmt::format("FlowStreamHandler: only_directions filter contained an invalid/unsupported direction: {}", dir)); - } - } - } - if (config_exists("geoloc_notfound") && config_get("geoloc_notfound")) { _f_enabled.set(Filters::GeoLocNotFound); } @@ -357,8 +280,6 @@ void FlowStreamHandler::process_sflow_cb(const SFSample &payload, [[maybe_unused flow.payload_size = sample.sampledPacketSize; } - flow.tos = static_cast(sample.dcd_ipTos); - flow.src_port = sample.dcd_sport; flow.dst_port = sample.dcd_dport; flow.if_in_index = sample.inputPort; @@ -434,7 +355,6 @@ void FlowStreamHandler::process_netflow_cb(const std::string &senderIP, const NF flow.packets = sample.flow_packets; flow.payload_size = sample.flow_octets; - flow.tos = sample.tos; flow.src_port = sample.src_port; flow.dst_port = sample.dst_port; @@ -442,11 +362,11 @@ void FlowStreamHandler::process_netflow_cb(const std::string &senderIP, const NF flow.if_in_index = sample.if_index_in; if (sample.is_ipv6) { - flow.ipv6_in = pcpp::IPv6Address(sample.src_ip.data()); - flow.ipv6_out = pcpp::IPv6Address(sample.dst_ip.data()); + flow.ipv6_in = pcpp::IPv6Address(reinterpret_cast(sample.src_ip)); + flow.ipv6_out = pcpp::IPv6Address(reinterpret_cast(sample.dst_ip)); } else { - flow.ipv4_in = pcpp::IPv4Address(sample.src_ip.data()); - flow.ipv4_out = pcpp::IPv4Address(sample.dst_ip.data()); + flow.ipv4_in = pcpp::IPv4Address(sample.src_ip); + flow.ipv4_out = pcpp::IPv4Address(sample.dst_ip); } if (!_filtering(flow, packet.device_id)) { @@ -462,9 +382,9 @@ void FlowStreamHandler::process_netflow_cb(const std::string &senderIP, const NF bool FlowStreamHandler::_filtering(FlowData &flow, const std::string &device_id) { if (_f_enabled[Filters::OnlyIps]) { - if (flow.is_ipv6 && !match_subnet(_only_ipv6_list, flow.ipv6_in.toBytes()).has_value() && !match_subnet(_only_ipv6_list, flow.ipv6_out.toBytes()).has_value()) { + if (flow.is_ipv6 && !match_subnet(_only_ipv6_list, flow.ipv6_in.toBytes()).first && !match_subnet(_only_ipv6_list, flow.ipv6_out.toBytes()).first) { return true; - } else if (!match_subnet(_only_ipv4_list, flow.ipv4_in.toInt()).has_value() && !match_subnet(_only_ipv4_list, flow.ipv4_out.toInt()).has_value()) { + } else if (!match_subnet(_only_ipv4_list, flow.ipv4_in.toInt()).first && !match_subnet(_only_ipv4_list, flow.ipv4_out.toInt()).first) { return true; } } @@ -474,9 +394,8 @@ bool FlowStreamHandler::_filtering(FlowData &flow, const std::string &device_id) })) { return true; } - - static constexpr uint8_t DEF_NO_MATCH = 2; if (_f_enabled[Filters::OnlyDeviceInterfaces]) { + static constexpr uint8_t DEF_NO_MATCH = 2; uint8_t no_match{0}; if (std::none_of(_device_interfaces_list[device_id].begin(), _device_interfaces_list[device_id].end(), [flow](auto pair) { return (flow.if_in_index >= pair.first && flow.if_in_index <= pair.second); @@ -495,22 +414,6 @@ bool FlowStreamHandler::_filtering(FlowData &flow, const std::string &device_id) } } - { - uint8_t no_match{0}; - if (_f_enabled[Filters::DisableIn]) { - flow.if_in_index.reset(); - ++no_match; - } - - if (_f_enabled[Filters::DisableOut]) { - flow.if_out_index.reset(); - ++no_match; - } - if (no_match == DEF_NO_MATCH) { - return true; - } - } - if (_f_enabled[Filters::GeoLocNotFound] && HandlerModulePlugin::city->enabled()) { if (!flow.is_ipv6) { sockaddr_in sa4{}; @@ -685,12 +588,8 @@ void FlowMetricsBucket::specialized_merge(const AbstractMetricsBucket &o, [[mayb top_dir.second.topDstPort.merge(interface.second->directionTopN.at(top_dir.first).topDstPort); } if (group_enabled(group::FlowMetrics::TopIPPorts)) { - top_dir.second.topSrcIPPort.merge(interface.second->directionTopN.at(top_dir.first).topSrcIPPort); - top_dir.second.topDstIPPort.merge(interface.second->directionTopN.at(top_dir.first).topDstIPPort); - } - if (group_enabled(group::FlowMetrics::TopTos)) { - top_dir.second.topDSCP.merge(interface.second->directionTopN.at(top_dir.first).topDSCP); - top_dir.second.topECN.merge(interface.second->directionTopN.at(top_dir.first).topECN); + top_dir.second.topSrcIPandPort.merge(interface.second->directionTopN.at(top_dir.first).topSrcIPandPort); + top_dir.second.topDstIPandPort.merge(interface.second->directionTopN.at(top_dir.first).topDstIPandPort); } } @@ -721,6 +620,11 @@ void FlowMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap a { std::shared_lock r_lock(_mutex); + SummaryData *summary{nullptr}; + if (_summary_data && _summary_data->type != IpSummary::None) { + summary = _summary_data; + } + for (const auto &device : _devices_metrics) { auto device_labels = add_labels; auto deviceId = device.first; @@ -821,32 +725,34 @@ void FlowMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap a continue; } if (group_enabled(group::FlowMetrics::TopIPs)) { - top_dir.second.topSrcIP.to_prometheus(out, interface_labels); - top_dir.second.topDstIP.to_prometheus(out, interface_labels); + if (summary) { + top_dir.second.topSrcIP.to_prometheus( + out, interface_labels, [summary](const std::string &val) { + return ip_summarization(val, summary); + }, + Metric::Aggregate::SUMMARY); + } else { + top_dir.second.topSrcIP.to_prometheus(out, interface_labels); + } + if (summary) { + top_dir.second.topDstIP.to_prometheus( + out, interface_labels, [summary](const std::string &val) { + return ip_summarization(val, summary); + }, + Metric::Aggregate::SUMMARY); + } else { + top_dir.second.topDstIP.to_prometheus(out, interface_labels); + } } if (group_enabled(group::FlowMetrics::TopPorts)) { - top_dir.second.topSrcPort.to_prometheus(out, interface_labels); - top_dir.second.topDstPort.to_prometheus(out, interface_labels); + top_dir.second.topSrcPort.to_prometheus( + out, interface_labels, [](const network::IpPort &val) { return val.get_service(); }, Metric::Aggregate::SUMMARY); + top_dir.second.topDstPort.to_prometheus( + out, interface_labels, [](const network::IpPort &val) { return val.get_service(); }, Metric::Aggregate::SUMMARY); } if (group_enabled(group::FlowMetrics::TopIPPorts)) { - top_dir.second.topSrcIPPort.to_prometheus(out, interface_labels); - top_dir.second.topDstIPPort.to_prometheus(out, interface_labels); - } - if (group_enabled(group::FlowMetrics::TopTos)) { - top_dir.second.topDSCP.to_prometheus(out, interface_labels, [](const uint8_t &val) { - if (DscpNames.find(val) != DscpNames.end()) { - return DscpNames[val]; - } else { - return std::to_string(val); - } - }); - top_dir.second.topECN.to_prometheus(out, interface_labels, [](const uint8_t &val) { - if (EcnNames.find(val) != EcnNames.end()) { - return EcnNames[val]; - } else { - return std::to_string(val); - } - }); + top_dir.second.topSrcIPandPort.to_prometheus(out, interface_labels); + top_dir.second.topDstIPandPort.to_prometheus(out, interface_labels); } } @@ -885,177 +791,14 @@ void FlowMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap a } } -void FlowMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const +void FlowMetricsBucket::to_json(json &j) const { std::shared_lock r_lock(_mutex); - for (const auto &device : _devices_metrics) { - auto device_labels = add_labels; - auto deviceId = device.first; - DeviceEnrich *dev{nullptr}; - if (_enrich_data) { - if (auto it = _enrich_data->find(deviceId); it != _enrich_data->end()) { - dev = &it->second; - deviceId = it->second.name; - } - } - device_labels["device"] = deviceId; - - if (group_enabled(group::FlowMetrics::Counters)) { - device.second->total.to_opentelemetry(scope, start_ts, end_ts, device_labels); - device.second->filtered.to_opentelemetry(scope, start_ts, end_ts, device_labels); - } - - if (group_enabled(group::FlowMetrics::ByBytes) && group_enabled(group::FlowMetrics::TopInterfaces)) { - device.second->topInIfIndexBytes.to_opentelemetry(scope, start_ts, end_ts, device_labels, [dev](const uint32_t &val) { - if (dev) { - if (auto it = dev->interfaces.find(val); it != dev->interfaces.end()) { - return it->second.name; - } - } - return std::to_string(val); - }); - device.second->topOutIfIndexBytes.to_opentelemetry(scope, start_ts, end_ts, device_labels, [dev](const uint32_t &val) { - if (dev) { - if (auto it = dev->interfaces.find(val); it != dev->interfaces.end()) { - return it->second.name; - } - } - return std::to_string(val); - }); - } - - if (group_enabled(group::FlowMetrics::ByPackets) && group_enabled(group::FlowMetrics::TopInterfaces)) { - device.second->topInIfIndexPackets.to_opentelemetry(scope, start_ts, end_ts, device_labels, [dev](const uint32_t &val) { - if (dev) { - if (auto it = dev->interfaces.find(val); it != dev->interfaces.end()) { - return it->second.name; - } - } - return std::to_string(val); - }); - device.second->topOutIfIndexPackets.to_opentelemetry(scope, start_ts, end_ts, device_labels, [dev](const uint32_t &val) { - if (dev) { - if (auto it = dev->interfaces.find(val); it != dev->interfaces.end()) { - return it->second.name; - } - } - return std::to_string(val); - }); - } - - for (const auto &interface : device.second->interfaces) { - auto interface_labels = device_labels; - std::string interfaceId = std::to_string(interface.first); - if (dev) { - if (auto it = dev->interfaces.find(interface.first); it != dev->interfaces.end()) { - interfaceId = it->second.name; - } - } - interface_labels["device_interface"] = deviceId + "|" + interfaceId; - - if (group_enabled(group::FlowMetrics::Cardinality)) { - if (group_enabled(group::FlowMetrics::Conversations)) { - interface.second->conversationsCard.to_opentelemetry(scope, start_ts, end_ts, device_labels); - } - interface.second->srcIPCard.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - interface.second->dstIPCard.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - interface.second->srcPortCard.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - interface.second->dstPortCard.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - - for (auto &count_dir : interface.second->counters) { - if ((count_dir.first == InBytes || count_dir.first == OutBytes) && !group_enabled(group::FlowMetrics::ByBytes)) { - continue; - } - if ((count_dir.first == InPackets || count_dir.first == OutPackets) && !group_enabled(group::FlowMetrics::ByPackets)) { - continue; - } - if (group_enabled(group::FlowMetrics::Counters)) { - count_dir.second.UDP.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - count_dir.second.TCP.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - count_dir.second.OtherL4.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - count_dir.second.IPv4.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - count_dir.second.IPv6.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - count_dir.second.total.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - } - - for (auto &top_dir : interface.second->directionTopN) { - if ((top_dir.first == InBytes || top_dir.first == OutBytes) && !group_enabled(group::FlowMetrics::ByBytes)) { - continue; - } - if ((top_dir.first == InPackets || top_dir.first == OutPackets) && !group_enabled(group::FlowMetrics::ByPackets)) { - continue; - } - if (group_enabled(group::FlowMetrics::TopIPs)) { - top_dir.second.topSrcIP.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - top_dir.second.topDstIP.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - if (group_enabled(group::FlowMetrics::TopPorts)) { - top_dir.second.topSrcPort.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - top_dir.second.topDstPort.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - if (group_enabled(group::FlowMetrics::TopIPPorts)) { - top_dir.second.topSrcIPPort.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - top_dir.second.topDstIPPort.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - if (group_enabled(group::FlowMetrics::TopTos)) { - top_dir.second.topDSCP.to_opentelemetry(scope, start_ts, end_ts, interface_labels, [](const uint8_t &val) { - if (DscpNames.find(val) != DscpNames.end()) { - return DscpNames[val]; - } else { - return std::to_string(val); - } - }); - top_dir.second.topECN.to_opentelemetry(scope, start_ts, end_ts, interface_labels, [](const uint8_t &val) { - if (EcnNames.find(val) != EcnNames.end()) { - return EcnNames[val]; - } else { - return std::to_string(val); - } - }); - } - } - - if (group_enabled(group::FlowMetrics::ByBytes)) { - if (group_enabled(group::FlowMetrics::TopGeo)) { - interface.second->topN.first.topGeoLoc.to_opentelemetry(scope, start_ts, end_ts, interface_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - interface.second->topN.first.topASN.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - if (group_enabled(group::FlowMetrics::Conversations)) { - interface.second->topN.first.topConversations.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - } - - if (group_enabled(group::FlowMetrics::ByPackets)) { - if (group_enabled(group::FlowMetrics::TopGeo)) { - interface.second->topN.second.topGeoLoc.to_opentelemetry(scope, start_ts, end_ts, interface_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - interface.second->topN.second.topASN.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - if (group_enabled(group::FlowMetrics::Conversations)) { - interface.second->topN.second.topConversations.to_opentelemetry(scope, start_ts, end_ts, interface_labels); - } - } - } + SummaryData *summary{nullptr}; + if (_summary_data && _summary_data->type != IpSummary::None) { + summary = _summary_data; } -} - -void FlowMetricsBucket::to_json(json &j) const -{ - std::shared_lock r_lock(_mutex); for (const auto &device : _devices_metrics) { auto deviceId = device.first; @@ -1154,32 +897,30 @@ void FlowMetricsBucket::to_json(json &j) const continue; } if (group_enabled(group::FlowMetrics::TopIPs)) { - top_dir.second.topSrcIP.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); - top_dir.second.topDstIP.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); + if (summary) { + top_dir.second.topSrcIP.to_json(j["devices"][deviceId]["interfaces"][interfaceId], [summary](const std::string &val) { + return ip_summarization(val, summary); + }); + } else { + top_dir.second.topSrcIP.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); + } + if (summary) { + top_dir.second.topDstIP.to_json(j["devices"][deviceId]["interfaces"][interfaceId], [summary](const std::string &val) { + return ip_summarization(val, summary); + }); + } else { + top_dir.second.topDstIP.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); + } } if (group_enabled(group::FlowMetrics::TopPorts)) { - top_dir.second.topSrcPort.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); - top_dir.second.topDstPort.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); + top_dir.second.topSrcPort.to_json( + j["devices"][deviceId]["interfaces"][interfaceId], [](const network::IpPort &val) { return val.get_service(); }, Metric::Aggregate::SUMMARY); + top_dir.second.topDstPort.to_json( + j["devices"][deviceId]["interfaces"][interfaceId], [](const network::IpPort &val) { return val.get_service(); }, Metric::Aggregate::SUMMARY); } if (group_enabled(group::FlowMetrics::TopIPPorts)) { - top_dir.second.topSrcIPPort.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); - top_dir.second.topDstIPPort.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); - } - if (group_enabled(group::FlowMetrics::TopTos)) { - top_dir.second.topDSCP.to_json(j["devices"][deviceId]["interfaces"][interfaceId], [](const uint8_t &val) { - if (DscpNames.find(val) != DscpNames.end()) { - return DscpNames[val]; - } else { - return std::to_string(val); - } - }); - top_dir.second.topECN.to_json(j["devices"][deviceId]["interfaces"][interfaceId], [](const uint8_t &val) { - if (EcnNames.find(val) != EcnNames.end()) { - return EcnNames[val]; - } else { - return std::to_string(val); - } - }); + top_dir.second.topSrcIPandPort.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); + top_dir.second.topDstIPandPort.to_json(j["devices"][deviceId]["interfaces"][interfaceId]); } } @@ -1218,7 +959,7 @@ void FlowMetricsBucket::to_json(json &j) const } } -void FlowMetricsBucket::process_flow(bool deep, const FlowPacket &payload, FlowCache &cache) +void FlowMetricsBucket::process_flow(bool deep, const FlowPacket &payload) { std::unique_lock lock(_mutex); @@ -1244,13 +985,13 @@ void FlowMetricsBucket::process_flow(bool deep, const FlowPacket &payload, FlowC } if (group_enabled(group::FlowMetrics::ByBytes)) { - process_interface(deep, device_flow->interfaces[flow.if_in_index.value()].get(), flow, cache, InBytes); + process_interface(deep, device_flow->interfaces[flow.if_in_index.value()].get(), flow, InBytes); if (deep && group_enabled(group::FlowMetrics::TopInterfaces)) { device_flow->topInIfIndexBytes.update(flow.if_in_index.value(), flow.payload_size); } } if (group_enabled(group::FlowMetrics::ByPackets)) { - process_interface(deep, device_flow->interfaces[flow.if_in_index.value()].get(), flow, cache, InPackets); + process_interface(deep, device_flow->interfaces[flow.if_in_index.value()].get(), flow, InPackets); if (deep && group_enabled(group::FlowMetrics::TopInterfaces)) { device_flow->topInIfIndexPackets.update(flow.if_in_index.value(), flow.packets); } @@ -1263,22 +1004,22 @@ void FlowMetricsBucket::process_flow(bool deep, const FlowPacket &payload, FlowC device_flow->interfaces[flow.if_out_index.value()]->set_topn_settings(_topn_count, _topn_percentile_threshold); } if (group_enabled(group::FlowMetrics::ByBytes)) { - process_interface(deep, device_flow->interfaces[flow.if_out_index.value()].get(), flow, cache, OutBytes); + process_interface(deep, device_flow->interfaces[flow.if_out_index.value()].get(), flow, OutBytes); if (deep && group_enabled(group::FlowMetrics::TopInterfaces)) { device_flow->topOutIfIndexBytes.update(flow.if_out_index.value(), flow.payload_size); } } if (group_enabled(group::FlowMetrics::ByPackets)) { - process_interface(deep, device_flow->interfaces[flow.if_out_index.value()].get(), flow, cache, OutPackets); + process_interface(deep, device_flow->interfaces[flow.if_out_index.value()].get(), flow, OutPackets); if (deep && group_enabled(group::FlowMetrics::TopInterfaces)) { - device_flow->topOutIfIndexPackets.update(flow.if_out_index.value(), flow.packets); + device_flow->topInIfIndexBytes.update(flow.if_out_index.value(), flow.packets); } } } } } -void FlowMetricsBucket::process_interface(bool deep, FlowInterface *iface, const FlowData &flow, FlowCache &cache, FlowDirectionType type) +void FlowMetricsBucket::process_interface(bool deep, FlowInterface *iface, const FlowData &flow, FlowDirectionType type) { uint64_t aggregator{0}; switch (type) { @@ -1323,27 +1064,12 @@ void FlowMetricsBucket::process_interface(bool deep, FlowInterface *iface, const proto = network::Protocol::UDP; } - std::string src_port{std::to_string(flow.src_port)}; - std::string dst_port{std::to_string(flow.dst_port)}; - if (group_enabled(group::FlowMetrics::TopPorts)) { if (flow.src_port > 0) { - if (auto port = cache.lru_port_list.getValue({flow.src_port, proto}); port.has_value()) { - src_port = port.value(); - } else { - src_port = network::IpPort::get_service(flow.src_port, proto); - cache.lru_port_list.put({flow.src_port, proto}, src_port); - } - iface->directionTopN.at(type).topSrcPort.update(src_port, aggregator); + iface->directionTopN.at(type).topSrcPort.update(network::IpPort{flow.src_port, proto}, aggregator); } if (flow.dst_port > 0) { - if (auto port = cache.lru_port_list.getValue({flow.dst_port, proto}); port.has_value()) { - dst_port = port.value(); - } else { - dst_port = network::IpPort::get_service(flow.dst_port, proto); - cache.lru_port_list.put({flow.dst_port, proto}, dst_port); - } - iface->directionTopN.at(type).topDstPort.update(dst_port, aggregator); + iface->directionTopN.at(type).topDstPort.update(network::IpPort{flow.dst_port, proto}, aggregator); } } @@ -1355,79 +1081,53 @@ void FlowMetricsBucket::process_interface(bool deep, FlowInterface *iface, const iface->dstPortCard.update(flow.dst_port); } } - if (group_enabled(group::FlowMetrics::TopTos)) { - iface->directionTopN.at(type).topDSCP.update((flow.tos >> DSCP_SHIFT), aggregator); - iface->directionTopN.at(type).topECN.update((flow.tos & ECN_MASK), aggregator); - } std::string application_src; std::string application_dst; - std::string ip; + if (!flow.is_ipv6 && flow.ipv4_in.isValid()) { group_enabled(group::FlowMetrics::Cardinality) ? iface->srcIPCard.update(flow.ipv4_in.toInt()) : void(); - if (auto ipv4 = cache.lru_ipv4_list.getValue(flow.ipv4_in.toInt()); ipv4.has_value()) { - ip = ipv4.value(); - } else { - ip = ip_summarization(flow.ipv4_in, _summary_data); - cache.lru_ipv4_list.put(flow.ipv4_in.toInt(), ip); - } - application_src = ip + ":" + src_port; + auto ip = flow.ipv4_in.toString(); + application_src = ip + ":" + std::to_string(flow.src_port); if (group_enabled(group::FlowMetrics::TopIPs)) { iface->directionTopN.at(type).topSrcIP.update(ip, aggregator); } if ((flow.src_port > 0) && group_enabled(group::FlowMetrics::TopIPPorts)) { - iface->directionTopN.at(type).topSrcIPPort.update(application_src, aggregator); + iface->directionTopN.at(type).topSrcIPandPort.update(application_src, aggregator); } _process_geo_metrics(iface, type, flow.ipv4_in, aggregator); } else if (flow.is_ipv6 && flow.ipv6_in.isValid()) { group_enabled(group::FlowMetrics::Cardinality) ? iface->srcIPCard.update(reinterpret_cast(flow.ipv6_in.toBytes()), 16) : void(); - auto ipv6_str = flow.ipv6_in.toString(); - if (auto ipv6 = cache.lru_ipv6_list.getValue(ipv6_str); ipv6.has_value()) { - ip = ipv6.value(); - } else { - ip = ip_summarization(flow.ipv6_in, _summary_data); - cache.lru_ipv6_list.put(ipv6_str, ip); - } - application_src = ip + ":" + src_port; + auto ip = flow.ipv6_in.toString(); + application_src = ip + ":" + std::to_string(flow.src_port); if (group_enabled(group::FlowMetrics::TopIPs)) { iface->directionTopN.at(type).topSrcIP.update(ip, aggregator); } if ((flow.src_port > 0) && group_enabled(group::FlowMetrics::TopIPPorts)) { - iface->directionTopN.at(type).topSrcIPPort.update(application_src, aggregator); + iface->directionTopN.at(type).topSrcIPandPort.update(application_src, aggregator); } _process_geo_metrics(iface, type, flow.ipv6_in, aggregator); } if (!flow.is_ipv6 && flow.ipv4_out.isValid()) { group_enabled(group::FlowMetrics::Cardinality) ? iface->dstIPCard.update(flow.ipv4_out.toInt()) : void(); - if (auto ipv4 = cache.lru_ipv4_list.getValue(flow.ipv4_out.toInt()); ipv4.has_value()) { - ip = ipv4.value(); - } else { - ip = ip_summarization(flow.ipv4_out, _summary_data); - cache.lru_ipv4_list.put(flow.ipv4_out.toInt(), ip); - } - application_dst = ip + ":" + dst_port; + auto ip = flow.ipv4_out.toString(); + application_dst = ip + ":" + std::to_string(flow.dst_port); if (group_enabled(group::FlowMetrics::TopIPs)) { iface->directionTopN.at(type).topDstIP.update(ip, aggregator); } if ((flow.dst_port > 0) && group_enabled(group::FlowMetrics::TopIPPorts)) { - iface->directionTopN.at(type).topDstIPPort.update(application_dst, aggregator); + iface->directionTopN.at(type).topDstIPandPort.update(application_dst, aggregator); } _process_geo_metrics(iface, type, flow.ipv4_out, aggregator); } else if (flow.is_ipv6 && flow.ipv6_out.isValid()) { group_enabled(group::FlowMetrics::Cardinality) ? iface->dstIPCard.update(reinterpret_cast(flow.ipv6_out.toBytes()), 16) : void(); - auto ipv6_str = flow.ipv6_out.toString(); - if (auto ipv6 = cache.lru_ipv6_list.getValue(ipv6_str); ipv6.has_value()) { - ip = ipv6.value(); - } else { - ip = ip_summarization(flow.ipv6_out, _summary_data); - cache.lru_ipv6_list.put(ipv6_str, ip); - } - application_dst = ip + ":" + dst_port; + auto ip = flow.ipv6_in.toString(); + application_dst = ip + ":" + std::to_string(flow.dst_port); if (group_enabled(group::FlowMetrics::TopIPs)) { iface->directionTopN.at(type).topDstIP.update(ip, aggregator); } if ((flow.dst_port > 0) && group_enabled(group::FlowMetrics::TopIPPorts)) { - iface->directionTopN.at(type).topDstIPPort.update(application_dst, aggregator); + iface->directionTopN.at(type).topDstIPandPort.update(application_dst, aggregator); } _process_geo_metrics(iface, type, flow.ipv6_out, aggregator); } @@ -1500,6 +1200,6 @@ void FlowMetricsManager::process_flow(const FlowPacket &payload) { new_event(payload.stamp); // process in the "live" bucket - live_bucket()->process_flow(_deep_sampling_now, payload, _cache); + live_bucket()->process_flow(_deep_sampling_now, payload); } } diff --git a/src/handlers/flow/FlowStreamHandler.h b/src/handlers/flow/FlowStreamHandler.h index 7c8733102..375080c1a 100644 --- a/src/handlers/flow/FlowStreamHandler.h +++ b/src/handlers/flow/FlowStreamHandler.h @@ -10,7 +10,6 @@ #include "IpPort.h" #include "MockInputStream.h" #include "StreamHandler.h" -#include "VisorLRUList.h" #include "utils.h" #include #include @@ -32,7 +31,6 @@ enum FlowMetrics : visor::MetricGroupIntType { TopPorts, TopIPs, TopIPPorts, - TopTos, TopGeo, TopInterfaces }; @@ -64,14 +62,10 @@ struct DeviceEnrich { struct SummaryData { IpSummary type{IpSummary::None}; - bool exclude_unknown_asns{false}; lib::utils::IPv4subnetList ipv4_exclude_summary; lib::utils::IPv6subnetList ipv6_exclude_summary; - std::vector asn_exclude_summary; lib::utils::IPv4subnetList ipv4_summary; lib::utils::IPv6subnetList ipv6_summary; - std::optional ipv4_wildcard; - std::optional ipv6_wildcard; }; typedef std::unordered_map EnrichData; @@ -81,7 +75,6 @@ struct FlowData { IP_PROTOCOL l4; size_t payload_size; uint32_t packets; - uint8_t tos; pcpp::IPv4Address ipv4_in; pcpp::IPv4Address ipv4_out; pcpp::IPv6Address ipv6_in; @@ -106,21 +99,15 @@ struct FlowPacket { } }; -struct FlowCache { - LRUList lru_port_list{2000}; - LRUList lru_ipv4_list{1000}; - LRUList lru_ipv6_list{1000}; -}; - struct FlowTopN { TopN topConversations; TopN topGeoLoc; TopN topASN; FlowTopN(std::string metric) - : topConversations(FLOW_SCHEMA, "conversation", {"top_conversations_" + metric}, "Top source IP addresses and port by " + metric) + : topConversations(FLOW_SCHEMA, "conversations", {"top_conversations_" + metric}, "Top source IP addresses and port by " + metric) , topGeoLoc(FLOW_SCHEMA, "geo_loc", {"top_geo_loc_" + metric}, "Top GeoIP locations by " + metric) - , topASN(FLOW_SCHEMA, "asn", {"top_asn_" + metric}, "Top ASNs by IP by " + metric) + , topASN(FLOW_SCHEMA, "asn", {"top_ASN_" + metric}, "Top ASNs by IP by " + metric) { } @@ -135,22 +122,18 @@ struct FlowTopN { struct FlowDirectionTopN { TopN topSrcIP; TopN topDstIP; - TopN topSrcPort; - TopN topDstPort; - TopN topSrcIPPort; - TopN topDstIPPort; - TopN topDSCP; - TopN topECN; + TopN topSrcPort; + TopN topDstPort; + TopN topSrcIPandPort; + TopN topDstIPandPort; FlowDirectionTopN(std::string direction, std::string metric) : topSrcIP(FLOW_SCHEMA, "ip", {"top_" + direction + "_src_ips_" + metric}, "Top " + direction + " source IP addresses by " + metric) , topDstIP(FLOW_SCHEMA, "ip", {"top_" + direction + "_dst_ips_" + metric}, "Top " + direction + " destination IP addresses by " + metric) , topSrcPort(FLOW_SCHEMA, "port", {"top_" + direction + "_src_ports_" + metric}, "Top " + direction + " source ports by " + metric) , topDstPort(FLOW_SCHEMA, "port", {"top_" + direction + "_dst_ports_" + metric}, "Top " + direction + " destination ports by " + metric) - , topSrcIPPort(FLOW_SCHEMA, "ip_port", {"top_" + direction + "_src_ip_ports_" + metric}, "Top " + direction + " source IP addresses and port by " + metric) - , topDstIPPort(FLOW_SCHEMA, "ip_port", {"top_" + direction + "_dst_ip_ports_" + metric}, "Top " + direction + " destination IP addresses and port by " + metric) - , topDSCP(FLOW_SCHEMA, "dscp", {"top_" + direction + "_dscp_" + metric}, "Top " + direction + " IP DSCP by " + metric) - , topECN(FLOW_SCHEMA, "ecn", {"top_" + direction + "_ecn_" + metric}, "Top " + direction + " IP ECN by " + metric) + , topSrcIPandPort(FLOW_SCHEMA, "ip_port", {"top_" + direction + "_src_ips_and_port_" + metric}, "Top " + direction + " source IP addresses and port by " + metric) + , topDstIPandPort(FLOW_SCHEMA, "ip_port", {"top_" + direction + "_dst_ips_and_port_" + metric}, "Top " + direction + " destination IP addresses and port by " + metric) { } @@ -160,8 +143,8 @@ struct FlowDirectionTopN { topDstIP.set_settings(topn_count, percentile_threshold); topSrcPort.set_settings(topn_count, percentile_threshold); topDstPort.set_settings(topn_count, percentile_threshold); - topSrcIPPort.set_settings(topn_count, percentile_threshold); - topDstIPPort.set_settings(topn_count, percentile_threshold); + topSrcIPandPort.set_settings(topn_count, percentile_threshold); + topDstIPandPort.set_settings(topn_count, percentile_threshold); } }; @@ -275,7 +258,6 @@ class FlowMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) override { _topn_count = topn_count; @@ -301,15 +283,14 @@ class FlowMetricsBucket final : public visor::AbstractMetricsBucket } _devices_metrics[device]->filtered += filtered; } - void process_flow(bool deep, const FlowPacket &payload, FlowCache &cache); - void process_interface(bool deep, FlowInterface *iface, const FlowData &flow, FlowCache &cache, FlowDirectionType type); + void process_flow(bool deep, const FlowPacket &payload); + void process_interface(bool deep, FlowInterface *iface, const FlowData &flow, FlowDirectionType type); }; class FlowMetricsManager final : public visor::AbstractMetricsManager { EnrichData _enrich_data; SummaryData _summary_data; - FlowCache _cache; public: FlowMetricsManager(const Configurable *window_config) @@ -344,9 +325,6 @@ class FlowMetricsManager final : public visor::AbstractMetricsManagerset_enrich_data(&_enrich_data); } - if (_summary_data.type != IpSummary::None) { - live_bucket()->set_summary_data(&_summary_data); - } } }; @@ -375,8 +353,6 @@ class FlowStreamHandler final : public visor::StreamMetricsHandler _f_enabled; @@ -387,13 +363,10 @@ class FlowStreamHandler final : public visor::StreamMetricsHandler -namespace visor::handler::flow { - -static constexpr uint8_t DSCP_SHIFT = 0x2; -static constexpr uint8_t ECN_MASK = 0x3; - -static std::unordered_map DscpNames({ - {0, "CS0"}, - {8, "CS1"}, - {16, "CS2"}, - {24, "CS3"}, - {32, "CS4"}, - {40, "CS5"}, - {48, "CS6"}, - {56, "CS7"}, - {10, "AF11"}, - {12, "AF12"}, - {14, "AF13"}, - {18, "AF21"}, - {20, "AF22"}, - {22, "AF23"}, - {26, "AF31"}, - {28, "AF32"}, - {30, "AF33"}, - {34, "AF41"}, - {36, "AF42"}, - {38, "AF43"}, - {46, "EF"}, - {43, "VOICE-ADMIT"}, -}); - -static std::unordered_map DscpNumbers({ - {"CS0", 0}, - {"CS1", 8}, - {"CS2", 16}, - {"CS3", 24}, - {"CS4", 32}, - {"CS5", 40}, - {"CS6", 48}, - {"CS7", 56}, - {"AF11", 10}, - {"AF12", 12}, - {"AF13", 14}, - {"AF21", 18}, - {"AF22", 20}, - {"AF23", 22}, - {"AF31", 26}, - {"AF32", 28}, - {"AF33", 30}, - {"AF41", 34}, - {"AF42", 36}, - {"AF43", 38}, - {"EF", 46}, - {"VOICE-ADMIT", 43}, -}); - -static std::unordered_map EcnNames({ - {0, "Not-ECT"}, - {1, "ECT(1)"}, - {2, "ECT(0)"}, - {3, "CE"}, -}); - -static std::unordered_map EcnNumbers({ - {"Not-ECT", 0}, - {"ECT(1)", 1}, - {"ECT(0)", 2}, - {"CE ", 3}, -}); - -} \ No newline at end of file diff --git a/src/handlers/flow/tests/test_flows.cpp b/src/handlers/flow/tests/test_flows.cpp index 2b689945e..ea0f0ae7d 100644 --- a/src/handlers/flow/tests/test_flows.cpp +++ b/src/handlers/flow/tests/test_flows.cpp @@ -17,7 +17,6 @@ TEST_CASE("Parse sflow stream", "[sflow][flow]") auto stream_proxy = stream.add_event_proxy(c); c.config_set("num_periods", 1); FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; - flow_handler.config_set("enable", visor::Configurable::StringList({"top_tos"})); flow_handler.start(); stream.start(); @@ -44,12 +43,8 @@ TEST_CASE("Parse sflow stream", "[sflow][flow]") CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_dst_ports_bytes"][0]["estimate"] == 170879120000); CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_dst_ports_bytes"][0]["name"] == "commplex-link"); CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ports_bytes"][0]["name"] == "dynamic-client"); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ip_ports_bytes"][0]["estimate"] == 108027400000); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ip_ports_bytes"][0]["name"] == "10.4.1.2:dynamic-client"); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_dscp_bytes"][0]["estimate"] == 170879120000); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_dscp_bytes"][0]["name"] == "CS0"); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_ecn_packets"][0]["estimate"] == 112600000); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_ecn_packets"][0]["name"] == "ECT(0)"); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ips_and_port_bytes"][0]["estimate"] == 26838240000); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ips_and_port_bytes"][0]["name"] == "10.4.1.2:57420"); } TEST_CASE("Parse sflow with enrichment", "[sflow][flow]") @@ -118,7 +113,7 @@ TEST_CASE("Parse sflow stream without sampling", "[sflow][flow]") stream.config_set("flow_type", "sflow"); stream.config_set("pcap_file", "tests/fixtures/ecmp.pcap"); visor::network::IpPort::set_csv_iana_ports("tests/fixtures/pktvisor-port-service-names.csv"); - + visor::Config c; auto stream_proxy = stream.add_event_proxy(c); c.config_set("num_periods", 1); @@ -147,9 +142,9 @@ TEST_CASE("Parse sflow stream without sampling", "[sflow][flow]") CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ips_bytes"][0]["name"] == "10.4.1.2"); CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ips_packets"][0]["estimate"] == 258); CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ips_packets"][0]["name"] == "10.4.4.2"); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_dst_ports_bytes"][0]["estimate"] == 18060); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ip_ports_bytes"][0]["estimate"] == 18060); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ip_ports_bytes"][0]["name"] == "10.4.4.2:commplex-link"); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_dst_ports_bytes"][0]["estimate"] == 13230); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ips_and_port_bytes"][0]["estimate"] == 18060); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ips_and_port_bytes"][0]["name"] == "10.4.4.2:5001"); } TEST_CASE("Parse sflow stream with ip filter", "[sflow][flow]") @@ -188,8 +183,8 @@ TEST_CASE("Parse sflow stream with ip filter", "[sflow][flow]") CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ips_packets"][0]["estimate"] == 5160000); CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_out_src_ips_packets"][0]["name"] == "10.4.4.2"); CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_dst_ports_bytes"][0]["estimate"] == 62851720000); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ip_ports_bytes"][0]["estimate"] == 62851720000); - CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ip_ports_bytes"][0]["name"] == "10.4.3.2:registered-40k"); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ips_and_port_bytes"][0]["estimate"] == 26443560000); + CHECK(j["devices"]["192.168.0.13"]["interfaces"]["52"]["top_in_src_ips_and_port_bytes"][0]["name"] == "10.4.3.2:40268"); } TEST_CASE("Parse sflow stream with device filter", "[sflow][flow]") @@ -230,8 +225,8 @@ TEST_CASE("Parse sflow stream with device filter", "[sflow][flow]") CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_packets"][0]["estimate"] == 8040000); CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_packets"][0]["name"] == "10.4.2.2"); CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_in_dst_ports_bytes"][0]["estimate"] == 264021720000); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ip_ports_bytes"][0]["estimate"] == 563840000); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ip_ports_bytes"][0]["name"] == "10.4.2.2:commplex-link"); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_and_port_bytes"][0]["estimate"] == 563840000); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_and_port_bytes"][0]["name"] == "10.4.2.2:5001"); CHECK(j["devices"]["192.168.0.13"]["records_filtered"] == 7189); } @@ -274,8 +269,8 @@ TEST_CASE("Parse sflow stream with port filter", "[sflow][flow]") CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_in_src_ips_packets"][0]["estimate"] == 16820000); CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_in_src_ips_packets"][0]["name"] == "10.4.3.2"); CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_in_dst_ports_bytes"][0]["estimate"] == 25532760000); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_out_src_ip_ports_bytes"][0]["estimate"] == 71400000); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_out_src_ip_ports_bytes"][0]["name"] == "10.4.4.2:commplex-link"); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_out_src_ips_and_port_bytes"][0]["estimate"] == 71400000); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_out_src_ips_and_port_bytes"][0]["name"] == "10.4.4.2:5001"); } TEST_CASE("Parse sflow stream with subnet summary", "[sflow][flow]") @@ -311,52 +306,6 @@ TEST_CASE("Parse sflow stream with subnet summary", "[sflow][flow]") CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_in_src_ips_bytes"][0]["name"] == "10.4.0.0/16"); } -TEST_CASE("Parse sflow stream with subnet summary wildcard", "[sflow][flow]") -{ - - FlowInputStream stream{"sflow-test"}; - stream.config_set("flow_type", "sflow"); - stream.config_set("pcap_file", "tests/fixtures/ecmp.pcap"); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; - flow_handler.config_set("subnets_for_summarization", {"0.0.0.0/16"}); - flow_handler.start(); - stream.start(); - stream.stop(); - flow_handler.stop(); - - auto event_data = flow_handler.metrics()->bucket(0)->event_data_locked(); - - // confirmed with wireshark - CHECK(event_data.num_events->value() == 9279); - CHECK(event_data.num_samples->value() == 9279); - - nlohmann::json j; - flow_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["4"]["top_in_src_ips_bytes"][0]["estimate"] == 738240000); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["4"]["top_in_src_ips_bytes"][0]["name"] == "10.4.0.0/16"); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_in_src_ips_bytes"][0]["estimate"] == 249921240000); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["38"]["top_in_src_ips_bytes"][0]["name"] == "10.4.0.0/16"); -} - -TEST_CASE("Flow handler error with multiple wildcards", "[sflow][flow]") -{ - FlowInputStream stream{"sflow-test"}; - stream.config_set("flow_type", "sflow"); - stream.config_set("pcap_file", "tests/fixtures/ecmp.pcap"); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; - flow_handler.config_set("subnets_for_summarization", {"0.0.0.0/16", "0.0.0.0/24"}); - REQUIRE_THROWS_WITH(flow_handler.start(), "FlowHandler: 'subnets_for_summarization' only allows one ipv4 and one ipv6 wildcard"); -} - TEST_CASE("Parse sflow stream with interfaces filter", "[sflow][flow]") { @@ -371,7 +320,6 @@ TEST_CASE("Parse sflow stream with interfaces filter", "[sflow][flow]") auto devices = std::make_shared(); devices->config_set("192.168.0.11", {"37", "4", "35-37"}); flow_handler.config_set>("only_device_interfaces", devices); - flow_handler.config_set("only_directions", {"in"}); flow_handler.start(); stream.start(); @@ -387,14 +335,16 @@ TEST_CASE("Parse sflow stream with interfaces filter", "[sflow][flow]") nlohmann::json j; flow_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["dst_ips_out"] == 1); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["src_ips_in"] == 1); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["dst_ports_out"] == 1); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["src_ports_in"] == 15); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["dst_ips_out"] == 2); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["src_ips_in"] == 2); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["dst_ports_out"] == 16); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["cardinality"]["src_ports_in"] == 16); CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_in_src_ips_bytes"][0]["estimate"] == 264021720000); CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_in_src_ips_bytes"][0]["name"] == "10.4.1.2"); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_packets"][0]["name"] == nullptr); - CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_and_port_bytes"][0]["estimate"] == nullptr); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_packets"][0]["estimate"] == 8040000); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_packets"][0]["name"] == "10.4.2.2"); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_and_port_bytes"][0]["estimate"] == 563840000); + CHECK(j["devices"]["192.168.0.11"]["interfaces"]["37"]["top_out_src_ips_and_port_bytes"][0]["name"] == "10.4.2.2:5001"); } TEST_CASE("Parse netflow stream", "[netflow][flow]") @@ -408,7 +358,6 @@ TEST_CASE("Parse netflow stream", "[netflow][flow]") auto stream_proxy = stream.add_event_proxy(c); c.config_set("num_periods", 1); FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; - flow_handler.config_set("enable", visor::Configurable::StringList({"top_tos"})); flow_handler.start(); stream.start(); @@ -425,89 +374,12 @@ TEST_CASE("Parse netflow stream", "[netflow][flow]") flow_handler.metrics()->bucket(0)->to_json(j); CHECK(j["devices"]["192.168.100.1"]["records_flows"] == 24); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["cardinality"]["dst_ips_out"] == 24); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["cardinality"]["src_ips_in"] == 24); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["cardinality"]["dst_ports_out"] == 0); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["cardinality"]["src_ports_in"] == 0); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["top_in_src_ips_bytes"][0]["estimate"] == 6066232); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["top_in_src_ips_packets"][0]["estimate"] == 7858); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["top_in_dscp_bytes"][0]["estimate"] == 142139882); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["top_in_dscp_bytes"][0]["name"] == "CS0"); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["top_in_ecn_packets"][0]["estimate"] == 183920); - CHECK(j["devices"]["192.168.100.1"]["interfaces"]["800"]["top_in_ecn_packets"][0]["name"] == "Not-ECT"); -} - -TEST_CASE("Parse IPFIX stream", "[netflow][flow]") -{ - - FlowInputStream stream{"ipfix-test"}; - stream.config_set("flow_type", "netflow"); - stream.config_set("pcap_file", "tests/fixtures/ipfix.pcap"); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; - - flow_handler.start(); - stream.start(); - stream.stop(); - flow_handler.stop(); - - auto event_data = flow_handler.metrics()->bucket(0)->event_data_locked(); - - // confirmed with wireshark - CHECK(event_data.num_events->value() == 23); - CHECK(event_data.num_samples->value() == 23); - - nlohmann::json j; - flow_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["devices"]["192.168.100.2"]["records_flows"] == 23); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["dst_ips_out"] == 1); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["src_ips_in"] == 1); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["dst_ports_out"] == 9); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["src_ports_in"] == 16); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["top_out_src_ips_bytes"][0]["estimate"] == 120000); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["top_out_src_ips_bytes"][0]["name"] == "::1"); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["top_out_src_ips_packets"][0]["estimate"] == 1472); -} - -TEST_CASE("Parse IPFIX stream with subnet summary wildcard", "[netflow][flow]") -{ - - FlowInputStream stream{"ipfix-test"}; - stream.config_set("flow_type", "netflow"); - stream.config_set("pcap_file", "tests/fixtures/ipfix.pcap"); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; - flow_handler.config_set("subnets_for_summarization", {"0.0.0.0/16", "::0/24"}); - - flow_handler.start(); - stream.start(); - stream.stop(); - flow_handler.stop(); - - auto event_data = flow_handler.metrics()->bucket(0)->event_data_locked(); - - // confirmed with wireshark - CHECK(event_data.num_events->value() == 23); - CHECK(event_data.num_samples->value() == 23); - - nlohmann::json j; - flow_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["devices"]["192.168.100.2"]["records_flows"] == 23); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["dst_ips_out"] == 1); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["src_ips_in"] == 1); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["dst_ports_out"] == 9); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["cardinality"]["src_ports_in"] == 16); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["top_out_src_ips_bytes"][0]["estimate"] == 120000); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["top_out_src_ips_bytes"][0]["name"] == "::/24"); - CHECK(j["devices"]["192.168.100.2"]["interfaces"]["0"]["top_out_src_ips_packets"][0]["estimate"] == 1472); + CHECK(j["devices"]["192.168.100.1"]["interfaces"]["0"]["cardinality"]["dst_ips_out"] == 24); + CHECK(j["devices"]["192.168.100.1"]["interfaces"]["0"]["cardinality"]["src_ips_in"] == 24); + CHECK(j["devices"]["192.168.100.1"]["interfaces"]["0"]["cardinality"]["dst_ports_out"] == 0); + CHECK(j["devices"]["192.168.100.1"]["interfaces"]["0"]["cardinality"]["src_ports_in"] == 0); + CHECK(j["devices"]["192.168.100.1"]["interfaces"]["0"]["top_in_src_ips_bytes"][0]["estimate"] == 6066232); + CHECK(j["devices"]["192.168.100.1"]["interfaces"]["0"]["top_in_src_ips_packets"][0]["estimate"] == 7858); } TEST_CASE("Flow invalid config", "[flow][filter][config]") @@ -521,5 +393,5 @@ TEST_CASE("Flow invalid config", "[flow][filter][config]") c.config_set("num_periods", 1); FlowStreamHandler flow_handler{"flow-test", stream_proxy, &c}; flow_handler.config_set("invalid_config", true); - REQUIRE_THROWS_WITH(flow_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: device_map, enrichment, only_device_interfaces, only_ips, only_ports, only_directions, geoloc_notfound, asn_notfound, summarize_ips_by_asn, subnets_for_summarization, exclude_asns_from_summarization, exclude_unknown_asns_from_summarization, exclude_ips_from_summarization, sample_rate_scaling, recorded_stream, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); + REQUIRE_THROWS_WITH(flow_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: device_map, enrichment, only_device_interfaces, only_ips, only_ports, geoloc_notfound, asn_notfound, summarize_ips_by_asn, subnets_for_summarization, exclude_ips_from_summarization, sample_rate_scaling, recorded_stream, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); } \ No newline at end of file diff --git a/src/handlers/input_resources/InputResourcesStreamHandler.cpp b/src/handlers/input_resources/InputResourcesStreamHandler.cpp index e0c1eb593..1e9197aa2 100644 --- a/src/handlers/input_resources/InputResourcesStreamHandler.cpp +++ b/src/handlers/input_resources/InputResourcesStreamHandler.cpp @@ -165,24 +165,6 @@ void InputResourcesMetricsBucket::to_prometheus(std::stringstream &out, Metric:: _handler_count.to_prometheus(out, add_labels); } -void InputResourcesMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - - _cpu_usage.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _memory_bytes.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _policy_count.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _handler_count.to_opentelemetry(scope, start_ts, end_ts, add_labels); -} - void InputResourcesMetricsBucket::to_json(json &j) const { bool live_rates = !read_only() && !recorded_stream(); diff --git a/src/handlers/input_resources/InputResourcesStreamHandler.h b/src/handlers/input_resources/InputResourcesStreamHandler.h index da1c8f077..80f3a7d60 100644 --- a/src/handlers/input_resources/InputResourcesStreamHandler.h +++ b/src/handlers/input_resources/InputResourcesStreamHandler.h @@ -53,7 +53,6 @@ class InputResourcesMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t, uint64_t) override { } diff --git a/src/handlers/input_resources/ThreadMonitor.h b/src/handlers/input_resources/ThreadMonitor.h index c8036433c..9eb9a28f5 100644 --- a/src/handlers/input_resources/ThreadMonitor.h +++ b/src/handlers/input_resources/ThreadMonitor.h @@ -73,8 +73,6 @@ class ThreadMonitor cpu_usage = 0.0; } return cpu_usage; -#else - return 0; #endif } @@ -100,8 +98,6 @@ class ThreadMonitor file.ignore(std::numeric_limits::max(), '\n'); } return 0; // Nothing found -#else - return 0; #endif } }; diff --git a/src/handlers/mock/MockStreamHandler.cpp b/src/handlers/mock/MockStreamHandler.cpp index 763246e39..0db3c83e5 100644 --- a/src/handlers/mock/MockStreamHandler.cpp +++ b/src/handlers/mock/MockStreamHandler.cpp @@ -78,13 +78,6 @@ void MockMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap a _counters.mock_counter.to_prometheus(out, add_labels); } -void MockMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - std::shared_lock r_lock(_mutex); - - _counters.mock_counter.to_opentelemetry(scope, start_ts, end_ts, add_labels); -} - void MockMetricsBucket::to_json(json &j) const { std::shared_lock r_lock(_mutex); diff --git a/src/handlers/mock/MockStreamHandler.h b/src/handlers/mock/MockStreamHandler.h index 1799ddebd..07e13c0ee 100644 --- a/src/handlers/mock/MockStreamHandler.h +++ b/src/handlers/mock/MockStreamHandler.h @@ -7,9 +7,9 @@ #include "AbstractMetricsManager.h" #include "MockInputStream.h" #include "StreamHandler.h" +#include #include #include -#include #include namespace visor::handler::mock { @@ -50,7 +50,6 @@ class MockMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t, uint64_t) override { } diff --git a/src/handlers/net/CMakeLists.txt b/src/handlers/net/CMakeLists.txt index 82ae25644..a82cf28e5 100644 --- a/src/handlers/net/CMakeLists.txt +++ b/src/handlers/net/CMakeLists.txt @@ -1,4 +1,26 @@ -add_subdirectory(v1) -add_subdirectory(v2) +message(STATUS "Handler Module: Net") -set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} PARENT_SCOPE) \ No newline at end of file +set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) + +corrade_add_static_plugin(VisorHandlerNet + ${CMAKE_CURRENT_BINARY_DIR} + NetHandler.conf + NetHandlerModulePlugin.cpp + NetStreamHandler.cpp) +add_library(Visor::Handler::Net ALIAS VisorHandlerNet) + +target_include_directories(VisorHandlerNet + INTERFACE + $ + ) + +target_link_libraries(VisorHandlerNet + PUBLIC + Visor::Input::Pcap + Visor::Input::Dnstap + Visor::Input::Mock + ) + +set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} Visor::Handler::Net PARENT_SCOPE) + +add_subdirectory(tests) \ No newline at end of file diff --git a/src/handlers/net/v1/NetHandler.conf b/src/handlers/net/NetHandler.conf similarity index 100% rename from src/handlers/net/v1/NetHandler.conf rename to src/handlers/net/NetHandler.conf diff --git a/src/handlers/net/v1/NetHandlerModulePlugin.cpp b/src/handlers/net/NetHandlerModulePlugin.cpp similarity index 93% rename from src/handlers/net/v1/NetHandlerModulePlugin.cpp rename to src/handlers/net/NetHandlerModulePlugin.cpp index e3069ec37..6a9fae8e8 100644 --- a/src/handlers/net/v1/NetHandlerModulePlugin.cpp +++ b/src/handlers/net/NetHandlerModulePlugin.cpp @@ -4,11 +4,11 @@ #include "NetHandlerModulePlugin.h" #include "CoreRegistry.h" -#include "Corrade/PluginManager/AbstractManager.h" #include "HandlerManager.h" #include "InputStreamManager.h" #include "NetStreamHandler.h" -#include "nlohmann/json.hpp" +#include +#include CORRADE_PLUGIN_REGISTER(VisorHandlerNet, visor::handler::net::NetHandlerModulePlugin, "visor.module.handler/1.0") diff --git a/src/handlers/net/v1/NetHandlerModulePlugin.h b/src/handlers/net/NetHandlerModulePlugin.h similarity index 100% rename from src/handlers/net/v1/NetHandlerModulePlugin.h rename to src/handlers/net/NetHandlerModulePlugin.h diff --git a/src/handlers/net/v1/NetStreamHandler.cpp b/src/handlers/net/NetStreamHandler.cpp similarity index 79% rename from src/handlers/net/v1/NetStreamHandler.cpp rename to src/handlers/net/NetStreamHandler.cpp index 48bf4d900..c4adc603b 100644 --- a/src/handlers/net/v1/NetStreamHandler.cpp +++ b/src/handlers/net/NetStreamHandler.cpp @@ -3,10 +3,9 @@ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #include "NetStreamHandler.h" -#include "Corrade/Utility/Debug.h" #include "HandlerModulePlugin.h" #include "utils.h" - +#include #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" @@ -15,16 +14,15 @@ #pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" #pragma clang diagnostic ignored "-Wc99-extensions" #endif -#include "IPv4Layer.h" -#include "IPv6Layer.h" -#include "PacketUtils.h" -#include "TimespecTimeval.h" -#include "VisorTcpLayer.h" +#include "TcpLayer.h" +#include +#include +#include #ifdef __GNUC__ #pragma GCC diagnostic pop #endif -#include "cpc_union.hpp" -#include "fmt/format.h" +#include +#include namespace visor::handler::net { @@ -93,31 +91,12 @@ void NetStreamHandler::start() if (_pcap_proxy) { _pkt_connection = _pcap_proxy->packet_signal.connect(&NetStreamHandler::process_packet_cb, this); - _pkt_tcp_reassembled_connection = _pcap_proxy->tcp_reassembled_signal.connect(&NetStreamHandler::process_tcp_reassembled_packet_cb, this); _start_tstamp_connection = _pcap_proxy->start_tstamp_signal.connect(&NetStreamHandler::set_start_tstamp, this); _end_tstamp_connection = _pcap_proxy->end_tstamp_signal.connect(&NetStreamHandler::set_end_tstamp, this); _heartbeat_connection = _pcap_proxy->heartbeat_signal.connect([this](const timespec stamp) { check_period_shift(stamp); _event_proxy ? _event_proxy->heartbeat_signal(stamp) : void(); }); - // only connect to TCP reassembly data if it is in chaining mode - if (_event_proxy) { - _tcp_start_connection = _pcap_proxy->tcp_connection_start_signal.connect([this](const pcpp::ConnectionData &connectionData, PacketDirection dir) { - if (validate_tcp_data(connectionData, dir, connectionData.startTime)) { - static_cast(_event_proxy.get())->tcp_connection_start_signal(connectionData, dir); - } - }); - _tcp_message_connection = _pcap_proxy->tcp_message_ready_signal.connect([this](int8_t side, const pcpp::TcpStreamData &tcpData, PacketDirection dir) { - if (validate_tcp_data(tcpData.getConnectionData(), dir, tcpData.getTimeStamp())) { - static_cast(_event_proxy.get())->tcp_message_ready_signal(side, tcpData, dir); - } - }); - _tcp_end_connection = _pcap_proxy->tcp_connection_end_signal.connect([this](const pcpp::ConnectionData &connectionData, pcpp::TcpReassembly::ConnectionEndReason reason) { - if (validate_tcp_data(connectionData, PacketDirection::unknown, connectionData.endTime)) { - static_cast(_event_proxy.get())->tcp_connection_end_signal(connectionData, reason); - } - }); - } } else if (_dnstap_proxy) { _dnstap_connection = _dnstap_proxy->dnstap_signal.connect(&NetStreamHandler::process_dnstap_cb, this); _heartbeat_connection = _dnstap_proxy->heartbeat_signal.connect([this](const timespec stamp) { @@ -139,12 +118,6 @@ void NetStreamHandler::stop() _pkt_connection.disconnect(); _start_tstamp_connection.disconnect(); _end_tstamp_connection.disconnect(); - _pkt_tcp_reassembled_connection.disconnect(); - if (_event_proxy) { - _tcp_start_connection.disconnect(); - _tcp_message_connection.disconnect(); - _tcp_end_connection.disconnect(); - } } else if (_dnstap_proxy) { _dnstap_connection.disconnect(); } @@ -168,16 +141,6 @@ void NetStreamHandler::process_packet_cb(pcpp::Packet &payload, PacketDirection } } -void NetStreamHandler::process_tcp_reassembled_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) -{ - if (!_filtering(payload, dir, stamp)) { - _metrics->process_packet(payload, dir, l3, pcpp::TCP, stamp); - if (_event_proxy) { - static_cast(_event_proxy.get())->tcp_reassembled_signal(payload, dir, l3, flowkey, stamp); - } - } -} - void NetStreamHandler::set_start_tstamp(timespec stamp) { _metrics->set_start_tstamp(stamp); @@ -194,22 +157,6 @@ void NetStreamHandler::set_end_tstamp(timespec stamp) } } -bool NetStreamHandler::validate_tcp_data(const pcpp::ConnectionData &connectionData, PacketDirection dir, timeval timeInterval) -{ - pcpp::Packet packet; - if (connectionData.srcIP.isIPv4()) { - packet.addLayer(new pcpp::IPv4Layer(connectionData.srcIP.getIPv4(), connectionData.dstIP.getIPv4()), true); - } else { - packet.addLayer(new pcpp::IPv6Layer(connectionData.srcIP.getIPv6(), connectionData.dstIP.getIPv6()), true); - } - packet.addLayer(new pcpp::TcpLayer(connectionData.srcPort, connectionData.dstPort), true); - - timespec stamp; - TIMEVAL_TO_TIMESPEC(&timeInterval, &stamp); - - return !_filtering(packet, dir, stamp); -} - void NetStreamHandler::process_dnstap_cb(const dnstap::Dnstap &payload, size_t size) { _metrics->process_dnstap(payload, size); @@ -387,63 +334,6 @@ void NetworkMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMa _payload_size.to_prometheus(out, add_labels); } -void NetworkMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - _rate_in.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _rate_out.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _rate_total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _throughput_in.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _throughput_out.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _throughput_total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - - if (group_enabled(group::NetMetrics::Counters)) { - _counters.UDP.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.TCP.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.TCP_SYN.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.OtherL4.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.IPv4.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.IPv6.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total_in.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total_out.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total_unk.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.total.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _counters.filtered.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::NetMetrics::Cardinality)) { - _srcIPCard.to_opentelemetry(scope, start_ts, end_ts, add_labels); - _dstIPCard.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::NetMetrics::TopIps)) { - _topIPv4.to_opentelemetry(scope, start_ts, end_ts, add_labels, [](const uint32_t &val) { return pcpp::IPv4Address(val).toString(); }); - _topIPv6.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - if (group_enabled(group::NetMetrics::TopGeo)) { - _topGeoLoc.to_opentelemetry(scope, start_ts, end_ts, add_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - _topASN.to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - _payload_size.to_opentelemetry(scope, start_ts, end_ts, add_labels); -} - void NetworkMetricsBucket::to_json(json &j) const { @@ -528,15 +418,17 @@ void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, Pack } } - NetworkPacket packet(dir, l3, l4, payload.getRawPacket()->getRawDataLen(), syn_flag); + NetworkPacket packet(dir, l3, l4, payload.getRawPacket()->getRawDataLen(), syn_flag, false); if (auto IP4layer = payload.getLayerOfType(); IP4layer) { + packet.is_ipv6 = false; if (dir == PacketDirection::toHost) { packet.ipv4_in = IP4layer->getSrcIPv4Address(); } else if (dir == PacketDirection::fromHost) { packet.ipv4_out = IP4layer->getDstIPv4Address(); } } else if (auto IP6layer = payload.getLayerOfType(); IP6layer) { + packet.is_ipv6 = true; if (dir == PacketDirection::toHost) { packet.ipv6_in = IP6layer->getSrcIPv6Address(); } else if (dir == PacketDirection::fromHost) { @@ -549,9 +441,11 @@ void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, Pack void NetworkMetricsBucket::process_dnstap(bool deep, const dnstap::Dnstap &payload, size_t size) { pcpp::ProtocolType l3{pcpp::UnknownProtocol}; + bool is_ipv6{false}; if (payload.message().has_socket_family()) { if (payload.message().socket_family() == dnstap::INET6) { l3 = pcpp::IPv6; + is_ipv6 = true; } else if (payload.message().socket_family() == dnstap::INET) { l3 = pcpp::IPv4; } @@ -601,17 +495,17 @@ void NetworkMetricsBucket::process_dnstap(bool deep, const dnstap::Dnstap &paylo process_net_layer(dir, l3, l4, size); return; } - NetworkPacket packet(dir, l3, l4, size, false); + NetworkPacket packet(dir, l3, l4, size, false, is_ipv6); - if (l3 == pcpp::IPv4 && payload.message().has_query_address() && payload.message().query_address().size() == 4) { + if (!is_ipv6 && payload.message().has_query_address() && payload.message().query_address().size() == 4) { packet.ipv4_in = pcpp::IPv4Address(reinterpret_cast(payload.message().query_address().data())); - } else if (l3 == pcpp::IPv6 && payload.message().has_query_address() && payload.message().query_address().size() == 16) { + } else if (is_ipv6 && payload.message().has_query_address() && payload.message().query_address().size() == 16) { packet.ipv6_in = pcpp::IPv6Address(reinterpret_cast(payload.message().query_address().data())); } - if (l3 == pcpp::IPv4 && payload.message().has_response_address() && payload.message().response_address().size() == 4) { + if (!is_ipv6 && payload.message().has_response_address() && payload.message().response_address().size() == 4) { packet.ipv4_out = pcpp::IPv4Address(reinterpret_cast(payload.message().response_address().data())); - } else if (l3 == pcpp::IPv6 && payload.message().has_response_address() && payload.message().response_address().size() == 16) { + } else if (is_ipv6 && payload.message().has_response_address() && payload.message().response_address().size() == 16) { packet.ipv6_out = pcpp::IPv6Address(reinterpret_cast(payload.message().response_address().data())); } @@ -742,21 +636,21 @@ void NetworkMetricsBucket::process_net_layer(NetworkPacket &packet) _payload_size.update(packet.payload_size); - if (packet.l3 == pcpp::IPv4 && packet.ipv4_in.isValid()) { + if (!packet.is_ipv6 && packet.ipv4_in.isValid()) { group_enabled(group::NetMetrics::Cardinality) ? _srcIPCard.update(packet.ipv4_in.toInt()) : void(); group_enabled(group::NetMetrics::TopIps) ? _topIPv4.update(packet.ipv4_in.toInt()) : void(); _process_geo_metrics(packet.ipv4_in); - } else if (packet.l3 == pcpp::IPv6 && packet.ipv6_in.isValid()) { + } else if (packet.is_ipv6 && packet.ipv6_in.isValid()) { group_enabled(group::NetMetrics::Cardinality) ? _srcIPCard.update(reinterpret_cast(packet.ipv6_in.toBytes()), 16) : void(); group_enabled(group::NetMetrics::TopIps) ? _topIPv6.update(packet.ipv6_in.toString()) : void(); _process_geo_metrics(packet.ipv6_in); } - if (packet.l3 == pcpp::IPv4 && packet.ipv4_out.isValid()) { + if (!packet.is_ipv6 && packet.ipv4_out.isValid()) { group_enabled(group::NetMetrics::Cardinality) ? _dstIPCard.update(packet.ipv4_out.toInt()) : void(); group_enabled(group::NetMetrics::TopIps) ? _topIPv4.update(packet.ipv4_out.toInt()) : void(); _process_geo_metrics(packet.ipv4_out); - } else if (packet.l3 == pcpp::IPv6 && packet.ipv6_out.isValid()) { + } else if (packet.is_ipv6 && packet.ipv6_out.isValid()) { group_enabled(group::NetMetrics::Cardinality) ? _dstIPCard.update(reinterpret_cast(packet.ipv6_out.toBytes()), 16) : void(); group_enabled(group::NetMetrics::TopIps) ? _topIPv6.update(packet.ipv6_out.toString()) : void(); _process_geo_metrics(packet.ipv6_out); @@ -766,7 +660,7 @@ void NetworkMetricsBucket::process_net_layer(NetworkPacket &packet) inline void NetworkMetricsBucket::_process_geo_metrics(const pcpp::IPv4Address &ipv4) { if ((HandlerModulePlugin::asn->enabled() || HandlerModulePlugin::city->enabled()) && group_enabled(group::NetMetrics::TopGeo)) { - sockaddr_in sa4{}; + struct sockaddr_in sa4; if (lib::utils::ipv4_to_sockaddr(ipv4, &sa4)) { if (HandlerModulePlugin::city->enabled()) { _topGeoLoc.update(HandlerModulePlugin::city->getGeoLoc(&sa4)); @@ -781,7 +675,7 @@ inline void NetworkMetricsBucket::_process_geo_metrics(const pcpp::IPv4Address & inline void NetworkMetricsBucket::_process_geo_metrics(const pcpp::IPv6Address &ipv6) { if ((HandlerModulePlugin::asn->enabled() || HandlerModulePlugin::city->enabled()) && group_enabled(group::NetMetrics::TopGeo)) { - sockaddr_in6 sa6{}; + struct sockaddr_in6 sa6; if (lib::utils::ipv6_to_sockaddr(ipv6, &sa6)) { if (HandlerModulePlugin::city->enabled()) { _topGeoLoc.update(HandlerModulePlugin::city->getGeoLoc(&sa6)); diff --git a/src/handlers/net/v1/NetStreamHandler.h b/src/handlers/net/NetStreamHandler.h similarity index 92% rename from src/handlers/net/v1/NetStreamHandler.h rename to src/handlers/net/NetStreamHandler.h index dcc1a8724..ed723792e 100644 --- a/src/handlers/net/v1/NetStreamHandler.h +++ b/src/handlers/net/NetStreamHandler.h @@ -5,12 +5,12 @@ #pragma once #include "AbstractMetricsManager.h" -#include "Corrade/Utility/Debug.h" #include "DnstapInputStream.h" #include "GeoDB.h" #include "MockInputStream.h" #include "PcapInputStream.h" #include "StreamHandler.h" +#include #include namespace visor::handler::net { @@ -36,17 +36,19 @@ struct NetworkPacket { pcpp::ProtocolType l4; size_t payload_size; bool syn_flag; + bool is_ipv6; pcpp::IPv4Address ipv4_in; pcpp::IPv4Address ipv4_out; pcpp::IPv6Address ipv6_in; pcpp::IPv6Address ipv6_out; - NetworkPacket(PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, size_t payload_size, bool syn_flag) + NetworkPacket(PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, size_t payload_size, bool syn_flag, bool is_ipv6) : dir(dir) , l3(l3) , l4(l4) , payload_size(payload_size) , syn_flag(syn_flag) + , is_ipv6(is_ipv6) { } }; @@ -139,7 +141,6 @@ class NetworkMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) override { _topGeoLoc.set_settings(topn_count, percentile_threshold); @@ -191,13 +192,10 @@ class NetStreamHandler final : public visor::StreamMetricsHandler \ No newline at end of file diff --git a/src/handlers/net/v1/tests/CMakeLists.txt b/src/handlers/net/tests/CMakeLists.txt similarity index 100% rename from src/handlers/net/v1/tests/CMakeLists.txt rename to src/handlers/net/tests/CMakeLists.txt diff --git a/src/handlers/net/v2/tests/main.cpp b/src/handlers/net/tests/main.cpp similarity index 80% rename from src/handlers/net/v2/tests/main.cpp rename to src/handlers/net/tests/main.cpp index cd298f86e..3ab9e7842 100644 --- a/src/handlers/net/v2/tests/main.cpp +++ b/src/handlers/net/tests/main.cpp @@ -1,8 +1,8 @@ #define CATCH_CONFIG_RUNNER -#include "catch2/catch.hpp" -#include "spdlog/sinks/stdout_color_sinks.h" -#include "spdlog/spdlog.h" +#include #include +#include +#include int main(int argc, char *argv[]) { diff --git a/src/handlers/net/v1/tests/test_json_schema.cpp b/src/handlers/net/tests/test_json_schema.cpp similarity index 92% rename from src/handlers/net/v1/tests/test_json_schema.cpp rename to src/handlers/net/tests/test_json_schema.cpp index 9ae5d3c9f..b2f9e5765 100644 --- a/src/handlers/net/v1/tests/test_json_schema.cpp +++ b/src/handlers/net/tests/test_json_schema.cpp @@ -2,14 +2,14 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ -#include "catch2/catch.hpp" -#include "nlohmann/json-schema.hpp" +#include #include +#include #include #include -#include "PcapInputStream.h" #include "NetStreamHandler.h" +#include "PcapInputStream.h" using namespace visor::handler::net; using namespace visor::input::pcap; @@ -40,7 +40,7 @@ TEST_CASE("Net JSON Schema", "[net][iface][json]") json net_json; net_handler.metrics()->window_merged_json(net_json, net_handler.schema_key(), 5); - std::ifstream sfile("handlers/net/v1/tests/window-schema.json"); + std::ifstream sfile("handlers/net/tests/window-schema.json"); CHECK(sfile.is_open()); std::string schema; diff --git a/src/handlers/net/v1/tests/test_net_layer.cpp b/src/handlers/net/tests/test_net_layer.cpp similarity index 89% rename from src/handlers/net/v1/tests/test_net_layer.cpp rename to src/handlers/net/tests/test_net_layer.cpp index df36c2360..b37ac6164 100644 --- a/src/handlers/net/v1/tests/test_net_layer.cpp +++ b/src/handlers/net/tests/test_net_layer.cpp @@ -1,10 +1,10 @@ -#include "catch2/catch.hpp" +#include #include "DnsStreamHandler.h" #include "DnstapInputStream.h" #include "GeoDB.h" -#include "PcapInputStream.h" #include "NetStreamHandler.h" +#include "PcapInputStream.h" using namespace visor::handler::net; using namespace visor::handler::dns; @@ -175,7 +175,7 @@ TEST_CASE("Parse net (dns) random UDP/TCP tests", "[pcap][net]") CHECK(j["cardinality"]["src_ips_in"] == 1); CHECK(j["top_ipv4"][0]["estimate"] == 16147); CHECK(j["top_ipv4"][0]["name"] == "8.8.8.8"); - CHECK(j["payload_size"]["p50"] >= 66); + CHECK(j["payload_size"]["p50"] == 74); } TEST_CASE("Parse net (dns) with DNS filter only_qname_suffix", "[pcap][dns][net]") @@ -264,61 +264,6 @@ TEST_CASE("Parse DNS with NET filter geo", "[pcap][dns][net]") CHECK(dns_counters.IPv4.value() == 24); } -TEST_CASE("Parse DNS TCP data with NET filter geo", "[pcap][dns][net]") -{ - CHECK_NOTHROW(visor::geo::GeoIP().enable("tests/fixtures/GeoIP2-City-Test.mmdb")); - CHECK_NOTHROW(visor::geo::GeoASN().enable("tests/fixtures/GeoIP2-ISP-Test.mmdb")); - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_tcp.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "127.0.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - net_handler.set_event_proxy(stream.create_event_proxy(c)); - DnsStreamHandler dns_handler{"dns-test", net_handler.get_event_proxy(), &c}; - dns_handler.set_event_proxy(stream.create_event_proxy(c)); - NetStreamHandler net_handler_2{"net-test-2", dns_handler.get_event_proxy(), &c}; - net_handler_2.set_event_proxy(stream.create_event_proxy(c)); - DnsStreamHandler dns_handler_2{"dns-test-2", net_handler_2.get_event_proxy(), &c}; - - net_handler.config_set("geoloc_notfound", true); - - dns_handler_2.start(); - net_handler_2.start(); - dns_handler.start(); - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - dns_handler.stop(); - net_handler_2.stop(); - dns_handler_2.stop(); - - auto net_counters = net_handler.metrics()->bucket(0)->counters(); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(event_data.num_events->value() == 2310); - CHECK(net_counters.TCP.value() == 2100); - CHECK(net_counters.IPv4.value() == 2100); - - auto dns_counters = dns_handler.metrics()->bucket(0)->counters(); - CHECK(dns_counters.TCP.value() == 420); - CHECK(dns_counters.IPv4.value() == 420); - - auto net_counters_2 = net_handler_2.metrics()->bucket(0)->counters(); - CHECK(net_counters_2.TCP.value() == 420); - CHECK(net_counters_2.IPv4.value() == 420); - - auto dns_counters_2 = dns_handler_2.metrics()->bucket(0)->counters(); - CHECK(dns_counters_2.TCP.value() == 420); - CHECK(dns_counters_2.IPv4.value() == 420); -} - TEST_CASE("Parse net dnstap stream", "[dnstap][net][!mayfail]") { diff --git a/src/handlers/net/v1/tests/window-schema.json b/src/handlers/net/tests/window-schema.json similarity index 100% rename from src/handlers/net/v1/tests/window-schema.json rename to src/handlers/net/tests/window-schema.json diff --git a/src/handlers/net/v1/CMakeLists.txt b/src/handlers/net/v1/CMakeLists.txt deleted file mode 100644 index e5e6ed287..000000000 --- a/src/handlers/net/v1/CMakeLists.txt +++ /dev/null @@ -1,27 +0,0 @@ -message(STATUS "Handler Module: Net") - -set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) - -corrade_add_static_plugin(VisorHandlerNet - ${CMAKE_CURRENT_BINARY_DIR} - NetHandler.conf - NetHandlerModulePlugin.cpp - NetStreamHandler.cpp) -add_library(Visor::Handler::Net ALIAS VisorHandlerNet) - -target_include_directories(VisorHandlerNet - INTERFACE - $ - ) - -target_link_libraries(VisorHandlerNet - PUBLIC - Visor::Lib::Tcp - Visor::Input::Pcap - Visor::Input::Dnstap - Visor::Input::Mock - ) - -set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} Visor::Handler::Net PARENT_SCOPE) - -add_subdirectory(tests) \ No newline at end of file diff --git a/src/handlers/net/v1/tests/main.cpp b/src/handlers/net/v1/tests/main.cpp deleted file mode 100644 index cd298f86e..000000000 --- a/src/handlers/net/v1/tests/main.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#define CATCH_CONFIG_RUNNER -#include "catch2/catch.hpp" -#include "spdlog/sinks/stdout_color_sinks.h" -#include "spdlog/spdlog.h" -#include - -int main(int argc, char *argv[]) -{ - Catch::Session session; - - auto logger = spdlog::get("visor"); - if (!logger) { - spdlog::stderr_color_mt("visor"); - } - - int result = session.applyCommandLine(argc, argv); - if (result != 0) { - return result; - } - - result = session.run(); - - return (result == 0 ? EXIT_SUCCESS : EXIT_FAILURE); -} diff --git a/src/handlers/net/v2/CMakeLists.txt b/src/handlers/net/v2/CMakeLists.txt deleted file mode 100644 index a26bfa33f..000000000 --- a/src/handlers/net/v2/CMakeLists.txt +++ /dev/null @@ -1,27 +0,0 @@ -message(STATUS "Handler Module: Net v2") - -set_directory_properties(PROPERTIES CORRADE_USE_PEDANTIC_FLAGS ON) - -corrade_add_static_plugin(VisorHandlerNetV2 - ${CMAKE_CURRENT_BINARY_DIR} - NetHandler.conf - NetHandlerModulePlugin.cpp - NetStreamHandler.cpp) -add_library(Visor::Handler::Net::V2 ALIAS VisorHandlerNetV2) - -target_include_directories(VisorHandlerNetV2 - INTERFACE - $ - ) - -target_link_libraries(VisorHandlerNetV2 - PUBLIC - Visor::Lib::Tcp - Visor::Input::Pcap - Visor::Input::Dnstap - Visor::Input::Mock - ) - -set(VISOR_STATIC_PLUGINS ${VISOR_STATIC_PLUGINS} Visor::Handler::Net::V2 PARENT_SCOPE) - -add_subdirectory(tests) \ No newline at end of file diff --git a/src/handlers/net/v2/NetHandler.conf b/src/handlers/net/v2/NetHandler.conf deleted file mode 100644 index 27372c0d7..000000000 --- a/src/handlers/net/v2/NetHandler.conf +++ /dev/null @@ -1,6 +0,0 @@ -# Aliases -provides=net -[data] -desc=Network (L3-L4) analyzer -type=handler -version=2.0 diff --git a/src/handlers/net/v2/NetHandlerModulePlugin.cpp b/src/handlers/net/v2/NetHandlerModulePlugin.cpp deleted file mode 100644 index ff66be294..000000000 --- a/src/handlers/net/v2/NetHandlerModulePlugin.cpp +++ /dev/null @@ -1,33 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#include "NetHandlerModulePlugin.h" -#include "CoreRegistry.h" -#include "Corrade/PluginManager/AbstractManager.h" -#include "HandlerManager.h" -#include "InputStreamManager.h" -#include "NetStreamHandler.h" -#include "nlohmann/json.hpp" - -CORRADE_PLUGIN_REGISTER(VisorHandlerNetV2, visor::handler::net::v2::NetHandlerModulePlugin, - "visor.module.handler/1.0") - -namespace visor::handler::net::v2 { - -using namespace visor::input::pcap; -using json = nlohmann::json; - -void NetHandlerModulePlugin::setup_routes(HttpServer *) -{ -} -std::unique_ptr NetHandlerModulePlugin::instantiate(const std::string &name, InputEventProxy *proxy, const Configurable *config, const Configurable *filter) -{ - // TODO using config as both window config and module config - auto handler_module = std::make_unique(name, proxy, config); - handler_module->config_merge(*config); - handler_module->config_merge(*filter); - return handler_module; -} - -} \ No newline at end of file diff --git a/src/handlers/net/v2/NetHandlerModulePlugin.h b/src/handlers/net/v2/NetHandlerModulePlugin.h deleted file mode 100644 index 6ef260326..000000000 --- a/src/handlers/net/v2/NetHandlerModulePlugin.h +++ /dev/null @@ -1,25 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#pragma once - -#include "HandlerModulePlugin.h" - -namespace visor::handler::net::v2 { - -class NetHandlerModulePlugin : public HandlerModulePlugin -{ - -protected: - void setup_routes(HttpServer *svr) override; - -public: - explicit NetHandlerModulePlugin(Corrade::PluginManager::AbstractManager &manager, const std::string &plugin) - : visor::HandlerModulePlugin{manager, plugin} - { - } - - std::unique_ptr instantiate(const std::string &name, InputEventProxy *proxy, const Configurable *config, const Configurable *filter) override; -}; -} diff --git a/src/handlers/net/v2/NetStreamHandler.cpp b/src/handlers/net/v2/NetStreamHandler.cpp deleted file mode 100644 index 64517dfef..000000000 --- a/src/handlers/net/v2/NetStreamHandler.cpp +++ /dev/null @@ -1,797 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#include "NetStreamHandler.h" -#include "Corrade/Utility/Debug.h" -#include "HandlerModulePlugin.h" -#include "utils.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpedantic" -#pragma GCC diagnostic ignored "-Wold-style-cast" -#pragma GCC diagnostic ignored "-Wunused-parameter" -#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" -#pragma clang diagnostic ignored "-Wc99-extensions" -#endif -#include "IPv4Layer.h" -#include "IPv6Layer.h" -#include "PacketUtils.h" -#include "TimespecTimeval.h" -#include "VisorTcpLayer.h" -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif -#include "cpc_union.hpp" -#include "fmt/format.h" - -namespace visor::handler::net::v2 { - -NetStreamHandler::NetStreamHandler(const std::string &name, InputEventProxy *proxy, const Configurable *window_config) - : visor::StreamMetricsHandler(name, window_config) -{ - // figure out which input event proxy we have - if (proxy) { - _pcap_proxy = dynamic_cast(proxy); - _dnstap_proxy = dynamic_cast(proxy); - _mock_proxy = dynamic_cast(proxy); - if (!_pcap_proxy && !_dnstap_proxy && !_mock_proxy) { - throw StreamHandlerException(fmt::format("NetStreamHandler: unsupported input event proxy {}", proxy->name())); - } - } -} - -void NetStreamHandler::start() -{ - if (_running) { - return; - } - - validate_configs(_config_defs); - - // default enabled groups - _groups.set(group::NetMetrics::Counters); - _groups.set(group::NetMetrics::Cardinality); - _groups.set(group::NetMetrics::Quantiles); - _groups.set(group::NetMetrics::TopGeo); - _groups.set(group::NetMetrics::TopIps); - - process_groups(_group_defs); - - // Setup Filters - if (config_exists("geoloc_notfound") && config_get("geoloc_notfound")) { - _f_enabled.set(Filters::GeoLocNotFound); - _f_geoloc_prefix.push_back("Unknown"); - } - - if (config_exists("asn_notfound") && config_get("asn_notfound")) { - _f_enabled.set(Filters::AsnNotFound); - _f_asn_number.push_back("Unknown"); - } - - if (config_exists("only_geoloc_prefix")) { - _f_enabled.set(Filters::GeoLocPrefix); - for (const auto &prefix : config_get("only_geoloc_prefix")) { - _f_geoloc_prefix.push_back(prefix); - } - } - - if (config_exists("only_asn_number")) { - _f_enabled.set(Filters::AsnNumber); - for (const auto &number : config_get("only_asn_number")) { - if (std::all_of(number.begin(), number.end(), ::isdigit)) { - _f_asn_number.push_back(number + '/'); - } else { - throw ConfigException(fmt::format("NetStreamHandler: only_asn_number filter contained an invalid/unsupported value: {}", number)); - } - } - } - - if (config_exists("recorded_stream")) { - _metrics->set_recorded_stream(); - } - - if (_pcap_proxy) { - _pkt_connection = _pcap_proxy->packet_signal.connect(&NetStreamHandler::process_packet_cb, this); - _pkt_tcp_reassembled_connection = _pcap_proxy->tcp_reassembled_signal.connect(&NetStreamHandler::process_tcp_reassembled_packet_cb, this); - _start_tstamp_connection = _pcap_proxy->start_tstamp_signal.connect(&NetStreamHandler::set_start_tstamp, this); - _end_tstamp_connection = _pcap_proxy->end_tstamp_signal.connect(&NetStreamHandler::set_end_tstamp, this); - _heartbeat_connection = _pcap_proxy->heartbeat_signal.connect([this](const timespec stamp) { - check_period_shift(stamp); - _event_proxy ? _event_proxy->heartbeat_signal(stamp) : void(); - }); - // only connect to TCP reassembly data if it is in chaining mode - if (_event_proxy) { - _tcp_start_connection = _pcap_proxy->tcp_connection_start_signal.connect([this](const pcpp::ConnectionData &connectionData, PacketDirection dir) { - if (validate_tcp_data(connectionData, dir, connectionData.startTime)) { - static_cast(_event_proxy.get())->tcp_connection_start_signal(connectionData, dir); - } - }); - _tcp_message_connection = _pcap_proxy->tcp_message_ready_signal.connect([this](int8_t side, const pcpp::TcpStreamData &tcpData, PacketDirection dir) { - if (validate_tcp_data(tcpData.getConnectionData(), dir, tcpData.getTimeStamp())) { - static_cast(_event_proxy.get())->tcp_message_ready_signal(side, tcpData, dir); - } - }); - _tcp_end_connection = _pcap_proxy->tcp_connection_end_signal.connect([this](const pcpp::ConnectionData &connectionData, pcpp::TcpReassembly::ConnectionEndReason reason) { - if (validate_tcp_data(connectionData, PacketDirection::unknown, connectionData.endTime)) { - static_cast(_event_proxy.get())->tcp_connection_end_signal(connectionData, reason); - } - }); - } - } else if (_dnstap_proxy) { - _dnstap_connection = _dnstap_proxy->dnstap_signal.connect(&NetStreamHandler::process_dnstap_cb, this); - _heartbeat_connection = _dnstap_proxy->heartbeat_signal.connect([this](const timespec stamp) { - check_period_shift(stamp); - _event_proxy ? _event_proxy->heartbeat_signal(stamp) : void(); - }); - } - - _running = true; -} - -void NetStreamHandler::stop() -{ - if (!_running) { - return; - } - - if (_pcap_proxy) { - _pkt_connection.disconnect(); - _start_tstamp_connection.disconnect(); - _end_tstamp_connection.disconnect(); - _pkt_tcp_reassembled_connection.disconnect(); - if (_event_proxy) { - _tcp_start_connection.disconnect(); - _tcp_message_connection.disconnect(); - _tcp_end_connection.disconnect(); - } - } else if (_dnstap_proxy) { - _dnstap_connection.disconnect(); - } - _heartbeat_connection.disconnect(); - - _running = false; -} - -NetStreamHandler::~NetStreamHandler() -{ -} - -// callback from input module -void NetStreamHandler::process_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, timespec stamp) -{ - if (!_filtering(payload, dir, stamp)) { - _metrics->process_packet(payload, dir, l3, l4, stamp); - if (_event_proxy && l4 == pcpp::UDP) { - static_cast(_event_proxy.get())->udp_signal(payload, dir, l3, pcpp::hash5Tuple(&payload), stamp); - } - } -} - -void NetStreamHandler::process_tcp_reassembled_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) -{ - if (!_filtering(payload, dir, stamp)) { - _metrics->process_packet(payload, dir, l3, pcpp::TCP, stamp); - if (_event_proxy) { - static_cast(_event_proxy.get())->tcp_reassembled_signal(payload, dir, l3, flowkey, stamp); - } - } -} - -void NetStreamHandler::set_start_tstamp(timespec stamp) -{ - _metrics->set_start_tstamp(stamp); - if (_event_proxy) { - static_cast(_event_proxy.get())->start_tstamp_signal(stamp); - } -} - -void NetStreamHandler::set_end_tstamp(timespec stamp) -{ - _metrics->set_end_tstamp(stamp); - if (_event_proxy) { - static_cast(_event_proxy.get())->end_tstamp_signal(stamp); - } -} - -bool NetStreamHandler::validate_tcp_data(const pcpp::ConnectionData &connectionData, PacketDirection dir, timeval timeInterval) -{ - pcpp::Packet packet; - if (connectionData.srcIP.isIPv4()) { - packet.addLayer(new pcpp::IPv4Layer(connectionData.srcIP.getIPv4(), connectionData.dstIP.getIPv4()), true); - } else { - packet.addLayer(new pcpp::IPv6Layer(connectionData.srcIP.getIPv6(), connectionData.dstIP.getIPv6()), true); - } - packet.addLayer(new pcpp::TcpLayer(connectionData.srcPort, connectionData.dstPort), true); - - timespec stamp; - TIMEVAL_TO_TIMESPEC(&timeInterval, &stamp); - - return !_filtering(packet, dir, stamp); -} - -void NetStreamHandler::process_dnstap_cb(const dnstap::Dnstap &payload, size_t size) -{ - _metrics->process_dnstap(payload, size); -} - -static inline bool begins_with(std::string_view str, std::string_view prefix) -{ - return str.size() >= prefix.size() && 0 == str.compare(0, prefix.size(), prefix); -} - -bool NetStreamHandler::_filtering(pcpp::Packet &payload, PacketDirection dir, timespec stamp) -{ - if (_f_enabled[Filters::GeoLocPrefix] || _f_enabled[Filters::GeoLocNotFound]) { - if (!HandlerModulePlugin::city->enabled() || dir == PacketDirection::unknown) { - goto will_filter; - } else if (auto IPv4Layer = payload.getLayerOfType(); IPv4Layer) { - struct sockaddr_in sa4; - if (dir == PacketDirection::toHost && lib::utils::ipv4_to_sockaddr(IPv4Layer->getSrcIPv4Address(), &sa4) && std::none_of(_f_geoloc_prefix.begin(), _f_geoloc_prefix.end(), [sa4](const auto &prefix) { - return begins_with(HandlerModulePlugin::city->getGeoLoc(&sa4).location, prefix); - })) { - goto will_filter; - } else if (dir == PacketDirection::fromHost && lib::utils::ipv4_to_sockaddr(IPv4Layer->getDstIPv4Address(), &sa4) && std::none_of(_f_geoloc_prefix.begin(), _f_geoloc_prefix.end(), [sa4](const auto &prefix) { - return begins_with(HandlerModulePlugin::city->getGeoLoc(&sa4).location, prefix); - })) { - goto will_filter; - } - } else if (auto IPv6layer = payload.getLayerOfType(); IPv6layer) { - struct sockaddr_in6 sa6; - if (dir == PacketDirection::toHost && lib::utils::ipv6_to_sockaddr(IPv6layer->getSrcIPv6Address(), &sa6) && std::none_of(_f_geoloc_prefix.begin(), _f_geoloc_prefix.end(), [sa6](const auto &prefix) { - return begins_with(HandlerModulePlugin::city->getGeoLoc(&sa6).location, prefix); - })) { - goto will_filter; - } else if (dir == PacketDirection::fromHost && lib::utils::ipv6_to_sockaddr(IPv6layer->getDstIPv6Address(), &sa6) && std::none_of(_f_geoloc_prefix.begin(), _f_geoloc_prefix.end(), [sa6](const auto &prefix) { - return begins_with(HandlerModulePlugin::city->getGeoLoc(&sa6).location, prefix); - })) { - goto will_filter; - } - } - } - if (_f_enabled[Filters::AsnNumber] || _f_enabled[Filters::AsnNotFound]) { - if (!HandlerModulePlugin::asn->enabled() || dir == PacketDirection::unknown) { - goto will_filter; - } else if (auto IPv4Layer = payload.getLayerOfType(); IPv4Layer) { - struct sockaddr_in sa4; - if (dir == PacketDirection::toHost && lib::utils::ipv4_to_sockaddr(IPv4Layer->getSrcIPv4Address(), &sa4) && std::none_of(_f_asn_number.begin(), _f_asn_number.end(), [sa4](const auto &prefix) { - return begins_with(HandlerModulePlugin::asn->getASNString(&sa4), prefix); - })) { - goto will_filter; - } else if (dir == PacketDirection::fromHost && lib::utils::ipv4_to_sockaddr(IPv4Layer->getDstIPv4Address(), &sa4) && std::none_of(_f_asn_number.begin(), _f_asn_number.end(), [sa4](const auto &prefix) { - return begins_with(HandlerModulePlugin::asn->getASNString(&sa4), prefix); - })) { - goto will_filter; - } - } else if (auto IPv6layer = payload.getLayerOfType(); IPv6layer) { - struct sockaddr_in6 sa6; - if (dir == PacketDirection::toHost && lib::utils::ipv6_to_sockaddr(IPv6layer->getSrcIPv6Address(), &sa6) && std::none_of(_f_asn_number.begin(), _f_asn_number.end(), [sa6](const auto &prefix) { - return begins_with(HandlerModulePlugin::asn->getASNString(&sa6), prefix); - })) { - goto will_filter; - } else if (dir == PacketDirection::fromHost && lib::utils::ipv6_to_sockaddr(IPv6layer->getDstIPv6Address(), &sa6) && std::none_of(_f_asn_number.begin(), _f_asn_number.end(), [sa6](const auto &prefix) { - return begins_with(HandlerModulePlugin::asn->getASNString(&sa6), prefix); - })) { - goto will_filter; - } - } - } - return false; -will_filter: - _metrics->process_filtered(stamp); - return true; -} - -void NetworkMetricsBucket::specialized_merge(const AbstractMetricsBucket &o, Metric::Aggregate agg_operator) -{ - // static because caller guarantees only our own bucket type - const auto &other = static_cast(o); - - // generate transaction directions if they do not exist - { - std::unique_lock w_lock(_mutex); - for (auto &net : other._net) { - if (!_net.count(net.first)) { - _net[net.first].update_topn_metrics(_topn_count, _topn_percentile_threshold); - } - } - } - - // rates maintain their own thread safety - if (group_enabled(group::NetMetrics::Quantiles)) { - for (auto &net : other._net) { - _net.at(net.first).rate.merge(net.second.rate, agg_operator); - _net.at(net.first).throughput.merge(net.second.throughput, agg_operator); - } - } - - std::shared_lock r_lock(other._mutex); - std::unique_lock w_lock(_mutex); - - group_enabled(group::NetMetrics::Counters) ? _filtered += other._filtered : void(); - - for (auto &net : other._net) { - group_enabled(group::NetMetrics::Counters) ? _net.at(net.first).counters += net.second.counters : void(); - - group_enabled(group::NetMetrics::Cardinality) ? _net.at(net.first).ipCard.merge(net.second.ipCard) : void(); - - if (group_enabled(group::NetMetrics::TopIps)) { - _net.at(net.first).topIPv4.merge(net.second.topIPv4); - _net.at(net.first).topIPv6.merge(net.second.topIPv6); - } - - if (group_enabled(group::NetMetrics::TopGeo)) { - _net.at(net.first).topGeoLoc.merge(net.second.topGeoLoc); - _net.at(net.first).topASN.merge(net.second.topASN); - } - - group_enabled(group::NetMetrics::Quantiles) ? _net.at(net.first).payload_size.merge(net.second.payload_size, agg_operator) : void(); - } -} - -void NetworkMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelMap add_labels) const -{ - - if (group_enabled(group::NetMetrics::Quantiles)) { - for (auto &net : _net) { - auto dir_labels = add_labels; - dir_labels["direction"] = _dir_str.at(net.first); - net.second.rate.to_prometheus(out, dir_labels); - net.second.throughput.to_prometheus(out, dir_labels); - } - } - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_prometheus(out, add_labels); - num_events->to_prometheus(out, add_labels); - num_samples->to_prometheus(out, add_labels); - } - - std::shared_lock r_lock(_mutex); - - group_enabled(group::NetMetrics::Counters) ? _filtered.to_prometheus(out, add_labels) : void(); - - for (auto &net : _net) { - auto dir_labels = add_labels; - dir_labels["direction"] = _dir_str.at(net.first); - - group_enabled(group::NetMetrics::Counters) ? net.second.counters.to_prometheus(out, dir_labels) : void(); - - group_enabled(group::NetMetrics::Cardinality) ? net.second.ipCard.to_prometheus(out, dir_labels) : void(); - - if (group_enabled(group::NetMetrics::TopIps)) { - net.second.topIPv4.to_prometheus(out, dir_labels, [](const uint32_t &val) { return pcpp::IPv4Address(val).toString(); }); - net.second.topIPv6.to_prometheus(out, dir_labels); - } - - if (group_enabled(group::NetMetrics::TopGeo)) { - net.second.topGeoLoc.to_prometheus(out, dir_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - net.second.topASN.to_prometheus(out, dir_labels); - } - - group_enabled(group::NetMetrics::Quantiles) ? net.second.payload_size.to_prometheus(out, dir_labels) : void(); - } -} - -void NetworkMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - if (group_enabled(group::NetMetrics::Quantiles)) { - for (auto &net : _net) { - auto dir_labels = add_labels; - dir_labels["direction"] = _dir_str.at(net.first); - net.second.rate.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - net.second.throughput.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - } - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_events->to_opentelemetry(scope, start_ts, end_ts, add_labels); - num_samples->to_opentelemetry(scope, start_ts, end_ts, add_labels); - } - - std::shared_lock r_lock(_mutex); - - group_enabled(group::NetMetrics::Counters) ? _filtered.to_opentelemetry(scope, start_ts, end_ts, add_labels) : void(); - - for (auto &net : _net) { - auto dir_labels = add_labels; - dir_labels["direction"] = _dir_str.at(net.first); - - group_enabled(group::NetMetrics::Counters) ? net.second.counters.to_opentelemetry(scope, start_ts, end_ts, dir_labels) : void(); - - group_enabled(group::NetMetrics::Cardinality) ? net.second.ipCard.to_opentelemetry(scope, start_ts, end_ts, dir_labels) : void(); - - if (group_enabled(group::NetMetrics::TopIps)) { - net.second.topIPv4.to_opentelemetry(scope, start_ts, end_ts, dir_labels, [](const uint32_t &val) { return pcpp::IPv4Address(val).toString(); }); - net.second.topIPv6.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - - if (group_enabled(group::NetMetrics::TopGeo)) { - net.second.topGeoLoc.to_opentelemetry(scope, start_ts, end_ts, dir_labels, [](Metric::LabelMap &l, const std::string &key, const visor::geo::City &val) { - l[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - l["lat"] = val.latitude; - l["lon"] = val.longitude; - } - }); - net.second.topASN.to_opentelemetry(scope, start_ts, end_ts, dir_labels); - } - - group_enabled(group::NetMetrics::Quantiles) ? net.second.payload_size.to_opentelemetry(scope, start_ts, end_ts, dir_labels) : void(); - } -} - -void NetworkMetricsBucket::to_json(json &j) const -{ - - // do rates first, which handle their own locking - bool live_rates = !read_only() && !recorded_stream(); - if (group_enabled(group::NetMetrics::Quantiles)) { - for (auto &net : _net) { - net.second.rate.to_json(j[_dir_str.at(net.first)], live_rates); - net.second.throughput.to_json(j[_dir_str.at(net.first)], live_rates); - } - } - - { - auto [num_events, num_samples, event_rate, event_lock] = event_data_locked(); // thread safe - - event_rate->to_json(j, live_rates); - num_events->to_json(j); - num_samples->to_json(j); - } - - std::shared_lock r_lock(_mutex); - - group_enabled(group::NetMetrics::Counters) ? _filtered.to_json(j) : void(); - - for (auto &net : _net) { - group_enabled(group::NetMetrics::Counters) ? net.second.counters.to_json(j[_dir_str.at(net.first)]) : void(); - - group_enabled(group::NetMetrics::Cardinality) ? net.second.ipCard.to_json(j[_dir_str.at(net.first)]) : void(); - - if (group_enabled(group::NetMetrics::TopIps)) { - net.second.topIPv4.to_json(j[_dir_str.at(net.first)], [](const uint32_t &val) { return pcpp::IPv4Address(val).toString(); }); - net.second.topIPv6.to_json(j[_dir_str.at(net.first)]); - } - - if (group_enabled(group::NetMetrics::TopGeo)) { - net.second.topGeoLoc.to_json(j[_dir_str.at(net.first)], [](json &j, const std::string &key, const visor::geo::City &val) { - j[key] = val.location; - if (!val.latitude.empty() && !val.longitude.empty()) { - j["lat"] = val.latitude; - j["lon"] = val.longitude; - } - }); - net.second.topASN.to_json(j[_dir_str.at(net.first)]); - } - - group_enabled(group::NetMetrics::Quantiles) ? net.second.payload_size.to_json(j[_dir_str.at(net.first)]) : void(); - } -} - -// the main bucket analysis -void NetworkMetricsBucket::process_filtered() -{ - std::unique_lock lock(_mutex); - if (group_enabled(group::NetMetrics::Counters)) { - ++_filtered; - } -} - -void NetworkMetricsBucket::process_packet(bool deep, pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4) -{ - if (!deep) { - process_net_layer(static_cast(dir), l3, l4, payload.getRawPacket()->getRawDataLen()); - return; - } - - bool syn_flag = false; - if (l4 == pcpp::TCP) { - pcpp::TcpLayer *tcpLayer = payload.getLayerOfType(); - if (tcpLayer) { - syn_flag = tcpLayer->getTcpHeader()->synFlag; - } - } - - NetworkPacket packet(static_cast(dir), l3, l4, payload.getRawPacket()->getRawDataLen(), syn_flag); - - if (auto IP4layer = payload.getLayerOfType(); IP4layer) { - if (dir == PacketDirection::toHost) { - packet.ipv4_src = IP4layer->getSrcIPv4Address(); - } else if (dir == PacketDirection::fromHost) { - packet.ipv4_dst = IP4layer->getDstIPv4Address(); - } else { - packet.ipv4_src = IP4layer->getSrcIPv4Address(); - packet.ipv4_dst = IP4layer->getDstIPv4Address(); - } - } else if (auto IP6layer = payload.getLayerOfType(); IP6layer) { - if (dir == PacketDirection::toHost) { - packet.ipv6_src = IP6layer->getSrcIPv6Address(); - } else if (dir == PacketDirection::fromHost) { - packet.ipv6_dst = IP6layer->getDstIPv6Address(); - } else { - packet.ipv6_src = IP6layer->getSrcIPv6Address(); - packet.ipv6_dst = IP6layer->getDstIPv6Address(); - } - } - - process_net_layer(packet); -} -void NetworkMetricsBucket::process_dnstap(bool deep, const dnstap::Dnstap &payload, size_t size) -{ - pcpp::ProtocolType l3{pcpp::UnknownProtocol}; - if (payload.message().has_socket_family()) { - if (payload.message().socket_family() == dnstap::INET6) { - l3 = pcpp::IPv6; - } else if (payload.message().socket_family() == dnstap::INET) { - l3 = pcpp::IPv4; - } - } - - pcpp::ProtocolType l4{pcpp::UnknownProtocol}; - if (payload.message().has_socket_protocol()) { - switch (payload.message().socket_protocol()) { - case dnstap::UDP: - l4 = pcpp::UDP; - break; - case dnstap::TCP: - l4 = pcpp::TCP; - break; - case dnstap::DOT: - case dnstap::DOH: - case dnstap::DNSCryptUDP: - case dnstap::DNSCryptTCP: - case dnstap::DOQ: - break; - } - } - - NetworkPacketDirection dir{NetworkPacketDirection::unknown}; - switch (payload.message().type()) { - case dnstap::Message_Type_CLIENT_QUERY: - case dnstap::Message_Type_STUB_RESPONSE: - case dnstap::Message_Type_RESOLVER_RESPONSE: - case dnstap::Message_Type_AUTH_QUERY: - case dnstap::Message_Type_FORWARDER_RESPONSE: - case dnstap::Message_Type_UPDATE_QUERY: - case dnstap::Message_Type_TOOL_RESPONSE: - dir = NetworkPacketDirection::in; - break; - case dnstap::Message_Type_STUB_QUERY: - case dnstap::Message_Type_CLIENT_RESPONSE: - case dnstap::Message_Type_RESOLVER_QUERY: - case dnstap::Message_Type_AUTH_RESPONSE: - case dnstap::Message_Type_FORWARDER_QUERY: - case dnstap::Message_Type_UPDATE_RESPONSE: - case dnstap::Message_Type_TOOL_QUERY: - dir = NetworkPacketDirection::out; - break; - } - - if (!deep) { - process_net_layer(dir, l3, l4, size); - return; - } - NetworkPacket packet(dir, l3, l4, size, false); - - if (l3 == pcpp::IPv4 && payload.message().has_query_address() && payload.message().query_address().size() == 4) { - packet.ipv4_src = pcpp::IPv4Address(reinterpret_cast(payload.message().query_address().data())); - } else if (l3 == pcpp::IPv6 && payload.message().has_query_address() && payload.message().query_address().size() == 16) { - packet.ipv6_src = pcpp::IPv6Address(reinterpret_cast(payload.message().query_address().data())); - } - - if (l3 == pcpp::IPv4 && payload.message().has_response_address() && payload.message().response_address().size() == 4) { - packet.ipv4_dst = pcpp::IPv4Address(reinterpret_cast(payload.message().response_address().data())); - } else if (l3 == pcpp::IPv6 && payload.message().has_response_address() && payload.message().response_address().size() == 16) { - packet.ipv6_dst = pcpp::IPv6Address(reinterpret_cast(payload.message().response_address().data())); - } - - process_net_layer(packet); -} - -void NetworkMetricsBucket::process_net_layer(NetworkPacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, size_t payload_size) -{ - std::unique_lock lock(_mutex); - - if (!_net.count(dir)) { - _net[dir].update_topn_metrics(_topn_count, _topn_percentile_threshold); - } - - auto &data = _net[dir]; - - auto payload_size_bits = payload_size * sizeof(uint8_t); - - ++data.rate; - data.throughput += payload_size_bits; - - if (group_enabled(group::NetMetrics::Counters)) { - ++data.counters.total; - - switch (l3) { - case pcpp::IPv6: - ++data.counters.IPv6; - break; - case pcpp::IPv4: - ++data.counters.IPv4; - break; - default: - break; - } - - switch (l4) { - case pcpp::UDP: - ++data.counters.UDP; - break; - case pcpp::TCP: - ++data.counters.TCP; - break; - default: - ++data.counters.OtherL4; - break; - } - } - - data.payload_size.update(payload_size); -} - -void NetworkMetricsBucket::process_net_layer(NetworkPacket &packet) -{ - std::unique_lock lock(_mutex); - - if (!_net.count(packet.dir)) { - _net[packet.dir].update_topn_metrics(_topn_count, _topn_percentile_threshold); - } - - auto &data = _net[packet.dir]; - - auto payload_size_bits = packet.payload_size * sizeof(uint8_t); - - ++data.rate; - data.throughput += payload_size_bits; - - if (group_enabled(group::NetMetrics::Counters)) { - ++data.counters.total; - - switch (packet.l3) { - case pcpp::IPv6: - ++data.counters.IPv6; - break; - case pcpp::IPv4: - ++data.counters.IPv4; - break; - default: - break; - } - - switch (packet.l4) { - case pcpp::UDP: - ++data.counters.UDP; - break; - case pcpp::TCP: - ++data.counters.TCP; - if (packet.syn_flag) { - ++data.counters.TCP_SYN; - } - break; - default: - ++data.counters.OtherL4; - break; - } - } - - data.payload_size.update(packet.payload_size); - - if (packet.l3 == pcpp::IPv4 && packet.ipv4_src.isValid()) { - group_enabled(group::NetMetrics::Cardinality) ? data.ipCard.update(packet.ipv4_src.toInt()) : void(); - group_enabled(group::NetMetrics::TopIps) ? data.topIPv4.update(packet.ipv4_src.toInt()) : void(); - _process_geo_metrics(data, packet.ipv4_src); - } else if (packet.l3 == pcpp::IPv6 && packet.ipv6_src.isValid()) { - group_enabled(group::NetMetrics::Cardinality) ? data.ipCard.update(reinterpret_cast(packet.ipv6_src.toBytes()), 16) : void(); - group_enabled(group::NetMetrics::TopIps) ? data.topIPv6.update(packet.ipv6_src.toString()) : void(); - _process_geo_metrics(data, packet.ipv6_src); - } - - if (packet.l3 == pcpp::IPv4 && packet.ipv4_dst.isValid()) { - group_enabled(group::NetMetrics::Cardinality) ? data.ipCard.update(packet.ipv4_dst.toInt()) : void(); - group_enabled(group::NetMetrics::TopIps) ? data.topIPv4.update(packet.ipv4_dst.toInt()) : void(); - _process_geo_metrics(data, packet.ipv4_dst); - } else if (packet.l3 == pcpp::IPv6 && packet.ipv6_dst.isValid()) { - group_enabled(group::NetMetrics::Cardinality) ? data.ipCard.update(reinterpret_cast(packet.ipv6_dst.toBytes()), 16) : void(); - group_enabled(group::NetMetrics::TopIps) ? data.topIPv6.update(packet.ipv6_dst.toString()) : void(); - _process_geo_metrics(data, packet.ipv6_dst); - } -} - -inline void NetworkMetricsBucket::_process_geo_metrics(NetworkDirection &net, const pcpp::IPv4Address &ipv4) -{ - if ((HandlerModulePlugin::asn->enabled() || HandlerModulePlugin::city->enabled()) && group_enabled(group::NetMetrics::TopGeo)) { - sockaddr_in sa4{}; - if (lib::utils::ipv4_to_sockaddr(ipv4, &sa4)) { - if (HandlerModulePlugin::city->enabled()) { - net.topGeoLoc.update(HandlerModulePlugin::city->getGeoLoc(&sa4)); - } - if (HandlerModulePlugin::asn->enabled()) { - net.topASN.update(HandlerModulePlugin::asn->getASNString(&sa4)); - } - } - } -} - -inline void NetworkMetricsBucket::_process_geo_metrics(NetworkDirection &net, const pcpp::IPv6Address &ipv6) -{ - if ((HandlerModulePlugin::asn->enabled() || HandlerModulePlugin::city->enabled()) && group_enabled(group::NetMetrics::TopGeo)) { - sockaddr_in6 sa6{}; - if (lib::utils::ipv6_to_sockaddr(ipv6, &sa6)) { - if (HandlerModulePlugin::city->enabled()) { - net.topGeoLoc.update(HandlerModulePlugin::city->getGeoLoc(&sa6)); - } - if (HandlerModulePlugin::asn->enabled()) { - net.topASN.update(HandlerModulePlugin::asn->getASNString(&sa6)); - } - } - } -} - -void NetworkMetricsManager::process_filtered(timespec stamp) -{ - // base event, no sample - new_event(stamp, false); - live_bucket()->process_filtered(); -} - -// the general metrics manager entry point -void NetworkMetricsManager::process_packet(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, timespec stamp) -{ - // base event - new_event(stamp); - // process in the "live" bucket - live_bucket()->process_packet(_deep_sampling_now, payload, dir, l3, l4); -} - -void NetworkMetricsManager::process_dnstap(const dnstap::Dnstap &payload, size_t size) -{ - // dnstap message type - auto mtype = payload.message().type(); - // set proper timestamp. use dnstap version if available, otherwise "now" - timespec stamp; - switch (mtype) { - case dnstap::Message_Type_CLIENT_RESPONSE: - case dnstap::Message_Type_AUTH_RESPONSE: - case dnstap::Message_Type_RESOLVER_RESPONSE: - if (payload.message().has_response_time_sec()) { - stamp.tv_sec = payload.message().response_time_sec(); - stamp.tv_nsec = payload.message().response_time_nsec(); - } - break; - case dnstap::Message_Type_CLIENT_QUERY: - case dnstap::Message_Type_AUTH_QUERY: - case dnstap::Message_Type_RESOLVER_QUERY: - if (payload.message().has_query_time_sec()) { - stamp.tv_sec = payload.message().query_time_sec(); - stamp.tv_nsec = payload.message().query_time_nsec(); - } - break; - default: - // use now() - std::timespec_get(&stamp, TIME_UTC); - } - // base event - new_event(stamp); - // process in the "live" bucket. this will parse the resources if we are deep sampling - live_bucket()->process_dnstap(_deep_sampling_now, payload, size); -} - -} diff --git a/src/handlers/net/v2/NetStreamHandler.h b/src/handlers/net/v2/NetStreamHandler.h deleted file mode 100644 index 2d8cf556a..000000000 --- a/src/handlers/net/v2/NetStreamHandler.h +++ /dev/null @@ -1,302 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#pragma once - -#include "AbstractMetricsManager.h" -#include "Corrade/Utility/Debug.h" -#include "DnstapInputStream.h" -#include "GeoDB.h" -#include "MockInputStream.h" -#include "PcapInputStream.h" -#include "StreamHandler.h" -#include - -namespace visor::handler::net::v2 { - -using namespace visor::input::pcap; -using namespace visor::input::dnstap; -using namespace visor::input::mock; - -static constexpr const char *NET_SCHEMA{"net"}; - -namespace group { -enum NetMetrics : visor::MetricGroupIntType { - Counters, - Cardinality, - Quantiles, - TopGeo, - TopIps -}; -} - -enum NetworkPacketDirection { - in, - out, - unknown -}; - -struct NetworkPacket { - NetworkPacketDirection dir; - pcpp::ProtocolType l3; - pcpp::ProtocolType l4; - size_t payload_size; - bool syn_flag; - pcpp::IPv4Address ipv4_src; - pcpp::IPv4Address ipv4_dst; - pcpp::IPv6Address ipv6_src; - pcpp::IPv6Address ipv6_dst; - - NetworkPacket(NetworkPacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, size_t payload_size, bool syn_flag) - : dir(dir) - , l3(l3) - , l4(l4) - , payload_size(payload_size) - , syn_flag(syn_flag) - { - } -}; - -struct NetworkDirection { - // total numPackets is tracked in base class num_events - struct Counters { - Counter UDP; - Counter TCP; - Counter OtherL4; - Counter IPv4; - Counter IPv6; - Counter TCP_SYN; - Counter total; - Counters() - : UDP(NET_SCHEMA, {"udp_packets"}, "Count of UDP packets") - , TCP(NET_SCHEMA, {"tcp_packets"}, "Count of TCP packets") - , OtherL4(NET_SCHEMA, {"other_l4_packets"}, "Count of packets which are not UDP or TCP") - , IPv4(NET_SCHEMA, {"ipv4_packets"}, "Count of IPv4 packets") - , IPv6(NET_SCHEMA, {"ipv6_packets"}, "Count of IPv6 packets") - , TCP_SYN(NET_SCHEMA, {"tcp", "syn_packets"}, "Count of TCP SYN packets") - , total(NET_SCHEMA, {"total_packets"}, "Count of total packets matching the configured filter(s)") - { - } - void operator+=(const Counters &other) - { - UDP += other.UDP; - TCP += other.TCP; - OtherL4 += other.OtherL4; - IPv4 += other.IPv4; - IPv6 += other.IPv6; - TCP_SYN += other.TCP_SYN; - total += other.total; - } - - void to_json(json &j) const - { - UDP.to_json(j); - TCP.to_json(j); - OtherL4.to_json(j); - IPv4.to_json(j); - IPv6.to_json(j); - TCP_SYN.to_json(j); - total.to_json(j); - } - - void to_prometheus(std::stringstream &out, const Metric::LabelMap &add_labels) const - { - UDP.to_prometheus(out, add_labels); - TCP.to_prometheus(out, add_labels); - OtherL4.to_prometheus(out, add_labels); - IPv4.to_prometheus(out, add_labels); - IPv6.to_prometheus(out, add_labels); - TCP_SYN.to_prometheus(out, add_labels); - total.to_prometheus(out, add_labels); - } - - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start, timespec &end, Metric::LabelMap add_labels) const - { - UDP.to_opentelemetry(scope, start, end, add_labels); - TCP.to_opentelemetry(scope, start, end, add_labels); - OtherL4.to_opentelemetry(scope, start, end, add_labels); - IPv4.to_opentelemetry(scope, start, end, add_labels); - IPv6.to_opentelemetry(scope, start, end, add_labels); - TCP_SYN.to_opentelemetry(scope, start, end, add_labels); - total.to_opentelemetry(scope, start, end, add_labels); - } - }; - Counters counters; - - Cardinality ipCard; - TopN topGeoLoc; - TopN topASN; - TopN topIPv4; - TopN topIPv6; - Quantile payload_size; - Rate rate; - Rate throughput; - - NetworkDirection() - : counters() - , ipCard(NET_SCHEMA, {"cardinality", "ips"}, "IP cardinality") - , topGeoLoc(NET_SCHEMA, "geo_loc", {"top_geo_loc_packets"}, "Top GeoIP locations") - , topASN(NET_SCHEMA, "asn", {"top_asn_packets"}, "Top ASNs by IP") - , topIPv4(NET_SCHEMA, "ipv4", {"top_ipv4_packets"}, "Top IPv4 addresses") - , topIPv6(NET_SCHEMA, "ipv6", {"top_ipv6_packets"}, "Top IPv6 addresses") - , payload_size(NET_SCHEMA, {"payload_size_bytes"}, "Quantiles of payload sizes, in bytes") - , rate(NET_SCHEMA, {"rates", "pps"}, "Rate of packets per second") - , throughput(NET_SCHEMA, {"rates", "bps"}, "Data rate of bits per second") - { - } - - void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) - { - topGeoLoc.set_settings(topn_count, percentile_threshold); - topASN.set_settings(topn_count, percentile_threshold); - topIPv4.set_settings(topn_count, percentile_threshold); - topIPv6.set_settings(topn_count, percentile_threshold); - } -}; - -class NetworkMetricsBucket final : public visor::AbstractMetricsBucket -{ - -protected: - mutable std::shared_mutex _mutex; - size_t _topn_count{10}; - uint64_t _topn_percentile_threshold{0}; - inline static const std::unordered_map _dir_str = { - {NetworkPacketDirection::in, "in"}, - {NetworkPacketDirection::out, "out"}, - {NetworkPacketDirection::unknown, "unknown"}}; - Counter _filtered; - std::map _net; - - void _process_geo_metrics(NetworkDirection &net, const pcpp::IPv4Address &ipv4); - void _process_geo_metrics(NetworkDirection &net, const pcpp::IPv6Address &ipv6); - -public: - NetworkMetricsBucket() - : _filtered(NET_SCHEMA, {"filtered_packets"}, "Total packets seen that did not match the configured filter(s) (if any)") - { - set_event_rate_info(NET_SCHEMA, {"rates", "observed_pps"}, "Rate of all packets before filtering per second"); - set_num_events_info(NET_SCHEMA, {"observed_packets"}, "Total packets events generated"); - set_num_sample_info(NET_SCHEMA, {"deep_sampled_packets"}, "Total packets that were sampled for deep inspection"); - } - - // get a copy of the counters - NetworkDirection::Counters counters(NetworkPacketDirection dir) const - { - std::shared_lock lock(_mutex); - return _net.at(dir).counters; - } - - // visor::AbstractMetricsBucket - void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; - void to_json(json &j) const override; - void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; - void update_topn_metrics(size_t topn_count, uint64_t percentile_threshold) override - { - _topn_count = topn_count; - _topn_percentile_threshold = percentile_threshold; - } - - // must be thread safe as it is called from time window maintenance thread - void on_set_read_only() override - { - // stop rate collection - for (auto &net : _net) { - net.second.rate.cancel(); - net.second.throughput.cancel(); - } - } - - void process_filtered(); - void process_packet(bool deep, pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4); - void process_dnstap(bool deep, const dnstap::Dnstap &payload, size_t size); - void process_net_layer(NetworkPacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, size_t payload_size); - void process_net_layer(NetworkPacket &packet); -}; - -class NetworkMetricsManager final : public visor::AbstractMetricsManager -{ -public: - NetworkMetricsManager(const Configurable *window_config) - : visor::AbstractMetricsManager(window_config) - { - } - - void process_filtered(timespec stamp); - void process_packet(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, timespec stamp); - void process_dnstap(const dnstap::Dnstap &payload, size_t size); -}; - -class NetStreamHandler final : public visor::StreamMetricsHandler -{ - - // the input event proxy we support (only one will be in use at a time) - PcapInputEventProxy *_pcap_proxy{nullptr}; - DnstapInputEventProxy *_dnstap_proxy{nullptr}; - MockInputEventProxy *_mock_proxy{nullptr}; - - sigslot::connection _dnstap_connection; - - sigslot::connection _pkt_connection; - sigslot::connection _pkt_tcp_reassembled_connection; - sigslot::connection _start_tstamp_connection; - sigslot::connection _end_tstamp_connection; - - sigslot::connection _tcp_start_connection; - sigslot::connection _tcp_end_connection; - sigslot::connection _tcp_message_connection; - - sigslot::connection _heartbeat_connection; - - static const inline StreamMetricsHandler::ConfigsDefType _config_defs = { - "geoloc_notfound", - "asn_notfound", - "only_geoloc_prefix", - "only_asn_number", - "recorded_stream"}; - - static const inline StreamMetricsHandler::GroupDefType _group_defs = { - {"cardinality", group::NetMetrics::Cardinality}, - {"counters", group::NetMetrics::Counters}, - {"quantiles", group::NetMetrics::Quantiles}, - {"top_geo", group::NetMetrics::TopGeo}, - {"top_ips", group::NetMetrics::TopIps}}; - - void process_dnstap_cb(const dnstap::Dnstap &, size_t); - void process_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, pcpp::ProtocolType l4, timespec stamp); - void process_tcp_reassembled_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp); - void set_start_tstamp(timespec stamp); - void set_end_tstamp(timespec stamp); - bool validate_tcp_data(const pcpp::ConnectionData &connectionData, PacketDirection dir, timeval timeInterval); - - // Net Filters - enum Filters { - GeoLocNotFound, - AsnNotFound, - GeoLocPrefix, - AsnNumber, - FiltersMAX - }; - - std::bitset _f_enabled; - std::vector _f_geoloc_prefix; - std::vector _f_asn_number; - bool _filtering(pcpp::Packet &payload, PacketDirection dir, timespec stamp); - -public: - NetStreamHandler(const std::string &name, InputEventProxy *proxy, const Configurable *window_config); - ~NetStreamHandler() override; - - // visor::AbstractModule - std::string schema_key() const override - { - return NET_SCHEMA; - } - - void start() override; - void stop() override; -}; - -} diff --git a/src/handlers/net/v2/tests/CMakeLists.txt b/src/handlers/net/v2/tests/CMakeLists.txt deleted file mode 100644 index 35204889b..000000000 --- a/src/handlers/net/v2/tests/CMakeLists.txt +++ /dev/null @@ -1,24 +0,0 @@ - -## TEST SUITE -if(WIN32) - #dnstap not supported - add_executable(unit-tests-handler-net-v2 - main.cpp - ) -else() - add_executable(unit-tests-handler-net-v2 - main.cpp - test_net_layer.cpp - ) -endif() - -target_link_libraries(unit-tests-handler-net-v2 - PRIVATE - ${CONAN_LIBS_JSON-SCHEMA-VALIDATOR} - Visor::Handler::Net::V2 - Visor::Handler::Dns::V2) - -add_test(NAME unit-tests-handler-net-v2 - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/src - COMMAND unit-tests-handler-net-v2 - ) diff --git a/src/handlers/net/v2/tests/test_json_schema.cpp b/src/handlers/net/v2/tests/test_json_schema.cpp deleted file mode 100644 index aa3c14d05..000000000 --- a/src/handlers/net/v2/tests/test_json_schema.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ - -#include "catch2/catch.hpp" -#include "nlohmann/json-schema.hpp" -#include -#include -#include - -#include "PcapInputStream.h" -#include "NetStreamHandler.h" - -using namespace visor::handler::net::v2; -using namespace visor::input::pcap; -using namespace nlohmann; -using nlohmann::json_schema::json_validator; - -TEST_CASE("Net JSON Schema", "[net][iface][json]") -{ - - SECTION("json iface") - { - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - net_handler.config_set("recorded_stream", true); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - json net_json; - net_handler.metrics()->window_merged_json(net_json, net_handler.schema_key(), 5); - std::ifstream sfile("handlers/net/v2/tests/window-schema.json"); - CHECK(sfile.is_open()); - std::string schema; - - sfile.seekg(0, std::ios::end); - schema.reserve(sfile.tellg()); - sfile.seekg(0, std::ios::beg); - - schema.assign((std::istreambuf_iterator(sfile)), std::istreambuf_iterator()); - json_validator validator; - - auto schema_json = json::parse(schema); - - try { - validator.set_root_schema(schema_json); - validator.validate(net_json); - } catch (const std::exception &e) { - FAIL(e.what()); - } - } -} diff --git a/src/handlers/net/v2/tests/test_net_layer.cpp b/src/handlers/net/v2/tests/test_net_layer.cpp deleted file mode 100644 index de015f048..000000000 --- a/src/handlers/net/v2/tests/test_net_layer.cpp +++ /dev/null @@ -1,569 +0,0 @@ -#include "catch2/catch.hpp" - -#include "DnsStreamHandler.h" -#include "DnstapInputStream.h" -#include "GeoDB.h" -#include "PcapInputStream.h" -#include "NetStreamHandler.h" - -using namespace visor::handler::net::v2; -using namespace visor::handler::dns::v2; -using namespace visor::input::pcap; - -TEST_CASE("Parse net (dns) UDP IPv4 tests", "[pcap][ipv4][udp][net]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_udp.pcap"); - stream.config_set("bpf", std::string()); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - net_handler.start(); - stream.start(); - net_handler.stop(); - stream.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::unknown); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->current_periods() == 1); - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706414); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 599964000); - - CHECK(net_handler.metrics()->end_tstamp().tv_sec == 1567706420); - CHECK(net_handler.metrics()->end_tstamp().tv_nsec == 602866000); - - CHECK(net_handler.metrics()->bucket(0)->period_length() == 6); - - CHECK(event_data.num_events->value() == 140); - CHECK(counters.UDP.value() == 140); - CHECK(counters.IPv4.value() == 140); - CHECK(counters.IPv6.value() == 0); -} - -TEST_CASE("Parse net (dns) TCP IPv4 tests", "[pcap][ipv4][tcp][net]") -{ - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_tcp.pcap"); - stream.config_set("bpf", ""); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - net_handler.start(); - stream.start(); - net_handler.stop(); - stream.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::unknown); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706433); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 56403000); - CHECK(event_data.num_events->value() == 2100); - CHECK(counters.TCP.value() == 2100); - CHECK(counters.TCP_SYN.value() == 420); - CHECK(counters.IPv4.value() == 2100); - CHECK(counters.IPv6.value() == 0); -} - -TEST_CASE("Parse net (dns) UDP IPv6 tests", "[pcap][ipv6][udp][net]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv6_udp.pcap"); - stream.config_set("bpf", ""); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::unknown); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706365); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 513271000); - CHECK(event_data.num_events->value() == 140); - CHECK(counters.UDP.value() == 140); - CHECK(counters.IPv4.value() == 0); - CHECK(counters.IPv6.value() == 140); -} - -TEST_CASE("Parse net (dns) TCP IPv6 tests", "[pcap][ipv6][tcp][net]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv6_tcp.pcap"); - stream.config_set("bpf", ""); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::unknown); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1567706308); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 958184000); - CHECK(event_data.num_events->value() == 1800); - CHECK(counters.TCP.value() == 1800); - CHECK(counters.TCP_SYN.value() == 360); - CHECK(counters.IPv4.value() == 0); - CHECK(counters.IPv6.value() == 1800); -} - -TEST_CASE("Parse net (dns) random UDP/TCP tests", "[pcap][net]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::in); - counters += net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::out); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1614874231); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 565771000); - - // confirmed with wireshark - CHECK(event_data.num_events->value() == 16147); - CHECK(event_data.num_samples->value() == 16147); - CHECK(counters.TCP.value() == 13176); - CHECK(counters.TCP_SYN.value() == 2846); - CHECK(counters.UDP.value() == 2971); - CHECK(counters.IPv4.value() == 16147); - CHECK(counters.IPv6.value() == 0); - CHECK(counters.OtherL4.value() == 0); - CHECK(counters.total.value() == 16147); - - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["out"]["cardinality"]["ips"] == 1); - CHECK(j["out"]["cardinality"]["ips"] == 1); - CHECK(j["out"]["top_ipv4_packets"][0]["estimate"] == 9499); - CHECK(j["out"]["top_ipv4_packets"][0]["name"] == "8.8.8.8"); - CHECK(j["in"]["payload_size_bytes"]["p50"] >= 66); -} - -TEST_CASE("Parse net (dns) with DNS filter only_qname_suffix", "[pcap][dns][net]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_mixed_rcode.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - DnsStreamHandler dns_handler{"dns-test", stream_proxy, &c}; - dns_handler.set_event_proxy(stream.create_event_proxy(c)); - NetStreamHandler net_handler{"net-test", dns_handler.get_event_proxy(), &c}; - - dns_handler.config_set("only_qname_suffix", {"google.com"}); - - net_handler.start(); - dns_handler.start(); - stream.start(); - stream.stop(); - dns_handler.stop(); - net_handler.stop(); - - auto dns_counters = dns_handler.metrics()->bucket(0)->counters(TransactionDirection::out); - CHECK(dns_counters.UDP.value() == 4); - CHECK(dns_counters.IPv4.value() == 4); - - auto net_counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::in); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(event_data.num_events->value() == 17); - CHECK(net_counters.TCP.value() == 0); - CHECK(net_counters.UDP.value() == 13); - CHECK(net_counters.IPv4.value() == 13); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["in"]["cardinality"]["ips"] == 8); - CHECK(j["in"]["top_ipv4_packets"][0]["estimate"] == 3); - CHECK(j["in"]["top_ipv4_packets"][0]["name"] == "192.168.0.54"); -} - -TEST_CASE("Parse DNS with NET filter geo", "[pcap][dns][net]") -{ - CHECK_NOTHROW(visor::geo::GeoIP().enable("tests/fixtures/GeoIP2-City-Test.mmdb")); - CHECK_NOTHROW(visor::geo::GeoASN().enable("tests/fixtures/GeoIP2-ISP-Test.mmdb")); - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_mixed_rcode.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - net_handler.set_event_proxy(stream.create_event_proxy(c)); - DnsStreamHandler dns_handler{"dns-test", net_handler.get_event_proxy(), &c}; - - net_handler.config_set("geoloc_notfound", true); - - dns_handler.start(); - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - dns_handler.stop(); - - auto net_counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::in); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(event_data.num_events->value() == 24); - CHECK(net_counters.TCP.value() == 0); - CHECK(net_counters.UDP.value() == 15); - CHECK(net_counters.IPv4.value() == 15); - - auto dns_counters = dns_handler.metrics()->bucket(0)->counters(TransactionDirection::out); - CHECK(dns_counters.UDP.value() == 9); - CHECK(dns_counters.IPv4.value() == 9); -} - -TEST_CASE("Parse DNS TCP data with NET filter geo", "[pcap][dns][net]") -{ - CHECK_NOTHROW(visor::geo::GeoIP().enable("tests/fixtures/GeoIP2-City-Test.mmdb")); - CHECK_NOTHROW(visor::geo::GeoASN().enable("tests/fixtures/GeoIP2-ISP-Test.mmdb")); - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_ipv4_tcp.pcap"); - stream.config_set("bpf", ""); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - net_handler.set_event_proxy(stream.create_event_proxy(c)); - DnsStreamHandler dns_handler{"dns-test", net_handler.get_event_proxy(), &c}; - dns_handler.set_event_proxy(stream.create_event_proxy(c)); - NetStreamHandler net_handler_2{"net-test-2", dns_handler.get_event_proxy(), &c}; - net_handler_2.set_event_proxy(stream.create_event_proxy(c)); - DnsStreamHandler dns_handler_2{"dns-test-2", net_handler_2.get_event_proxy(), &c}; - - dns_handler_2.start(); - net_handler_2.start(); - dns_handler.start(); - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - dns_handler.stop(); - net_handler_2.stop(); - dns_handler_2.stop(); - - auto net_counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::unknown); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(event_data.num_events->value() == 2100); - CHECK(net_counters.TCP.value() == 2100); - CHECK(net_counters.IPv4.value() == 2100); - - auto dns_counters = dns_handler.metrics()->bucket(0)->counters(TransactionDirection::unknown); - CHECK(dns_counters.TCP.value() == 210); - CHECK(dns_counters.IPv4.value() == 210); - - auto net_counters_2 = net_handler_2.metrics()->bucket(0)->counters(NetworkPacketDirection::unknown); - CHECK(net_counters_2.TCP.value() == 420); - CHECK(net_counters_2.IPv4.value() == 420); - - auto dns_counters_2 = dns_handler_2.metrics()->bucket(0)->counters(TransactionDirection::unknown); - CHECK(dns_counters_2.TCP.value() == 210); - CHECK(dns_counters_2.IPv4.value() == 210); -} - -TEST_CASE("Parse net dnstap stream", "[dnstap][net][!mayfail]") -{ - - DnstapInputStream stream{"dnstap-test"}; - stream.config_set("dnstap_file", "inputs/dnstap/tests/fixtures/fixture.dnstap"); - stream.config_set("only_hosts", {"192.168.0.0/24", "2001:db8::/48"}); - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"dns-test", stream_proxy, &c}; - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::in); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - // confirmed with wireshark - CHECK(event_data.num_events->value() == 153); - CHECK(event_data.num_samples->value() == 153); - CHECK(counters.TCP.value() == 0); - CHECK(counters.TCP_SYN.value() == 0); - CHECK(counters.UDP.value() == 79); - CHECK(counters.IPv4.value() == 79); - CHECK(counters.IPv6.value() == 0); - CHECK(counters.total.value() == 79); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["in"]["cardinality"]["ips"] == 2); - CHECK(j["in"]["top_ipv4_packets"][0]["estimate"] == 79); - CHECK(j["in"]["top_ipv4_packets"][0]["name"] == "192.168.0.54"); - CHECK(j["in"]["payload_size_bytes"]["p50"] == 89); -} - -TEST_CASE("Net groups", "[pcap][net]") -{ - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_tcp_random.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - SECTION("disable cardinality and counters") - { - net_handler.config_set("disable", {"cardinality", "counters"}); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::in); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1614874231); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 565771000); - - CHECK(event_data.num_events->value() == 16147); - CHECK(event_data.num_samples->value() == 16147); - CHECK(counters.TCP.value() == 0); - CHECK(counters.TCP_SYN.value() == 0); - CHECK(counters.UDP.value() == 0); - CHECK(counters.IPv4.value() == 0); - CHECK(counters.IPv6.value() == 0); - CHECK(counters.OtherL4.value() == 0); - CHECK(counters.total.value() == 0); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["in"]["cardinality"]["dst_ips_out"] == nullptr); - CHECK(j["in"]["cardinality"]["src_ips_in"] == nullptr); - CHECK(j["in"]["top_ipv4_packets"][0]["estimate"] == 6648); - CHECK(j["in"]["top_ipv4_packets"][0]["name"] == "8.8.8.8"); - } - - SECTION("disable Top ips and Top geo") - { - net_handler.config_set("disable", {"top_ips", "top_geo"}); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - auto counters = net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::in); - counters += net_handler.metrics()->bucket(0)->counters(NetworkPacketDirection::out); - auto event_data = net_handler.metrics()->bucket(0)->event_data_locked(); - - CHECK(net_handler.metrics()->start_tstamp().tv_sec == 1614874231); - CHECK(net_handler.metrics()->start_tstamp().tv_nsec == 565771000); - - // confirmed with wireshark - CHECK(event_data.num_events->value() == 16147); - CHECK(event_data.num_samples->value() == 16147); - CHECK(counters.TCP.value() == 13176); - CHECK(counters.UDP.value() == 2971); - CHECK(counters.IPv4.value() == 16147); - CHECK(counters.IPv6.value() == 0); - CHECK(counters.OtherL4.value() == 0); - CHECK(counters.total.value() == 16147); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - - CHECK(j["in"]["cardinality"]["ips"] == 1); - CHECK(j["in"]["top_ipv4"][0]["estimate"] == nullptr); - CHECK(j["in"]["top_ipv4"][0]["name"] == nullptr); - } - - SECTION("disable invalid dns group") - { - net_handler.config_set("disable", {"top_ips", "rates"}); - REQUIRE_THROWS_WITH(net_handler.start(), "rates is an invalid/unsupported metric group. The valid groups are: all, cardinality, counters, quantiles, top_geo, top_ips"); - } - - SECTION("enable invalid dns group") - { - net_handler.config_set("enable", {"top_ips", "rates"}); - REQUIRE_THROWS_WITH(net_handler.start(), "rates is an invalid/unsupported metric group. The valid groups are: all, cardinality, counters, quantiles, top_geo, top_ips"); - } -} - -TEST_CASE("Net geolocation filtering", "[pcap][net][geo]") -{ - CHECK_NOTHROW(visor::geo::GeoIP().enable("tests/fixtures/GeoIP2-City-Test.mmdb")); - CHECK_NOTHROW(visor::geo::GeoASN().enable("tests/fixtures/GeoIP2-ISP-Test.mmdb")); - - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_mixed_rcode.pcap"); - stream.config_set("bpf", ""); - stream.config_set("host_spec", "192.168.0.0/24"); - stream.parse_host_spec(); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - - SECTION("Enable geoloc not found") - { - net_handler.config_set("geoloc_notfound", true); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["in"]["top_ipv4_packets"][0]["estimate"] == 3); - CHECK(j["in"]["top_ipv4_packets"][0]["name"] == "192.168.0.54"); - CHECK(j["in"]["top_geo_loc_packets"][0]["estimate"] == 15); - CHECK(j["in"]["top_geo_loc_packets"][0]["name"] == "Unknown"); - } - - SECTION("Enable asn not found") - { - net_handler.config_set("asn_notfound", true); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["in"]["top_ipv4_packets"][0]["estimate"] == 3); - CHECK(j["in"]["top_ipv4_packets"][0]["name"] == "192.168.0.54"); - CHECK(j["in"]["top_asn_packets"][0]["estimate"] == 15); - CHECK(j["in"]["top_asn_packets"][0]["name"] == "Unknown"); - } - - SECTION("Enable geoloc and asn not found") - { - net_handler.config_set("geoloc_notfound", true); - net_handler.config_set("asn_notfound", true); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["in"]["top_ipv4_packets"][0]["estimate"] == 3); - CHECK(j["in"]["top_ipv4_packets"][0]["name"] == "192.168.0.54"); - CHECK(j["in"]["top_geo_loc_packets"][0]["estimate"] == 15); - CHECK(j["in"]["top_geo_loc_packets"][0]["name"] == "Unknown"); - CHECK(j["in"]["top_asn_packets"][0]["estimate"] == 15); - CHECK(j["in"]["top_asn_packets"][0]["name"] == "Unknown"); - } - - SECTION("Enable geoloc prefix") - { - net_handler.config_set("only_geoloc_prefix", {"NA/United States"}); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["filtered_packets"] == 24); - CHECK(j["top_geo_loc_packets"][0]["name"] == nullptr); - } - - SECTION("Enable asn number") - { - net_handler.config_set("only_asn_number", {"16509", "22131"}); - - net_handler.start(); - stream.start(); - stream.stop(); - net_handler.stop(); - - nlohmann::json j; - net_handler.metrics()->bucket(0)->to_json(j); - CHECK(j["filtered_packets"] == 24); - CHECK(j["top_asn_packets"][0]["name"] == nullptr); - } - - SECTION("Invalid asn number") - { - net_handler.config_set("only_asn_number", {"16509/Amazon"}); - REQUIRE_THROWS_WITH(net_handler.start(), "NetStreamHandler: only_asn_number filter contained an invalid/unsupported value: 16509/Amazon"); - } -} - -TEST_CASE("Net invalid config", "[net][filter][config]") -{ - PcapInputStream stream{"pcap-test"}; - stream.config_set("pcap_file", "tests/fixtures/dns_udp_mixed_rcode.pcap"); - - visor::Config c; - auto stream_proxy = stream.add_event_proxy(c); - c.config_set("num_periods", 1); - NetStreamHandler net_handler{"net-test", stream_proxy, &c}; - net_handler.config_set("invalid_config", true); - REQUIRE_THROWS_WITH(net_handler.start(), "invalid_config is an invalid/unsupported config or filter. The valid configs/filters are: geoloc_notfound, asn_notfound, only_geoloc_prefix, only_asn_number, recorded_stream, deep_sample_rate, num_periods, topn_count, topn_percentile_threshold"); -} diff --git a/src/handlers/net/v2/tests/window-schema.json b/src/handlers/net/v2/tests/window-schema.json deleted file mode 100644 index abc7eb213..000000000 --- a/src/handlers/net/v2/tests/window-schema.json +++ /dev/null @@ -1,464 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema", - "$id": "http://example.com/example.json", - "type": "object", - "title": "The root schema", - "description": "The root schema comprises the entire JSON document.", - "default": {}, - "examples": [ - { - "net": { - "cardinality": { - "dst_ips": 1, - "src_ips": 1 - }, - "deep_sampled_packets": 16147, - "observed_packets": 16147, - "in_packets": 6648, - "ipv4_packets": 16147, - "ipv6_packets": 0, - "other_l4_packets": 0, - "out_packets": 9499, - "period": { - "length": 31, - "start_ts": 1614874231 - }, - "tcp_packets": 13176, - "tcp": { - "syn_packets": 2846 - }, - "top_asn_packets": [], - "top_geo_loc_packets": [], - "top_ipv4_packets": [ - { - "estimate": 16147, - "name": "8.8.8.8" - } - ], - "top_ipv6_packets": [], - "total_packets": 16147, - "udp_packets": 2971, - "unknown_dir_packets": 0 - } - } - ], - "required": [ - "packets" - ], - "properties": { - "packets": { - "$id": "#/properties/packets", - "type": "object", - "title": "The packets schema", - "description": "An explanation about the purpose of this instance.", - "default": {}, - "examples": [ - { - "cardinality": { - "dst_ips": 1, - "src_ips": 1 - }, - "deep_sampled_packets": 16147, - "observed_packets": 16147, - "in_packets": 6648, - "ipv4_packets": 16147, - "ipv6_packets": 0, - "other_l4_packets": 0, - "out_packets": 9499, - "filtered_packets": 0, - "period": { - "length": 31, - "start_ts": 1614874231 - }, - "payload_size_bytes": { - "p50": 74, - "p90": 176, - "p95": 187, - "p99": 202 - }, - "tcp_packets": 13176, - "tcp": { - "syn": 2846 - }, - "top_asn_packets": [], - "top_geo_loc_packets": [], - "top_ipv4_packets": [ - { - "estimate": 16147, - "name": "8.8.8.8" - } - ], - "top_ipv6_packets": [], - "total_packets": 16147, - "udp_packets": 2971, - "unknown_dir_packets": 0 - } - ], - "required": [ - "cardinality", - "deep_sampled_packets", - "observed_packets", - "in_packets", - "ipv4_packets", - "ipv6_packets", - "other_l4_packets", - "out_packets", - "filtered_packets", - "period", - "payload_size_bytes", - "tcp_packets", - "top_asn_packets", - "top_geo_loc_packets", - "top_ipv4_packets", - "top_ipv6_packets", - "total_packets", - "udp_packets", - "unknown_dir_packets" - ], - "properties": { - "cardinality": { - "$id": "#/properties/packets/properties/cardinality", - "type": "object", - "title": "The cardinality schema", - "description": "An explanation about the purpose of this instance.", - "default": {}, - "examples": [ - { - "dst_ips": 1, - "src_ips": 1 - } - ], - "required": [ - "dst_ips", - "src_ips" - ], - "properties": { - "dst_ips": { - "$id": "#/properties/packets/properties/cardinality/properties/dst_ips", - "type": "integer", - "title": "The dst_ips schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 1 - ] - }, - "src_ips": { - "$id": "#/properties/packets/properties/cardinality/properties/src_ips", - "type": "integer", - "title": "The src_ips schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 1 - ] - } - }, - "additionalProperties": false - }, - "deep_sampled_packets": { - "$id": "#/properties/packets/properties/deep_sampled_packets", - "type": "integer", - "title": "The deep_sampled_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 16147 - ] - }, - "observed_packets": { - "$id": "#/properties/packets/properties/observed_packets", - "type": "integer", - "title": "The observed_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 16147 - ] - }, - "in_packets": { - "$id": "#/properties/packets/properties/in_packets", - "type": "integer", - "title": "The in_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 6648 - ] - }, - "ipv4_packets": { - "$id": "#/properties/packets/properties/ipv4_packets", - "type": "integer", - "title": "The ipv4_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 16147 - ] - }, - "ipv6_packets": { - "$id": "#/properties/packets/properties/ipv6_packets", - "type": "integer", - "title": "The ipv6_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 0 - ] - }, - "other_l4_packets": { - "$id": "#/properties/packets/properties/other_l4_packets", - "type": "integer", - "title": "The other_l4_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 0 - ] - }, - "out_packets": { - "$id": "#/properties/packets/properties/out_packets", - "type": "integer", - "title": "The out_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 9499 - ] - }, - "filtered_packets": { - "$id": "#/properties/packets/properties/filtered_packets", - "type": "integer", - "title": "The filtered_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 2 - ] - }, - "period": { - "$id": "#/properties/packets/properties/period", - "type": "object", - "title": "The period schema", - "description": "An explanation about the purpose of this instance.", - "default": {}, - "examples": [ - { - "length": 31, - "start_ts": 1614874231 - } - ], - "required": [ - "length", - "start_ts" - ], - "properties": { - "length": { - "$id": "#/properties/packets/properties/period/properties/length", - "type": "integer", - "title": "The length schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 31 - ] - }, - "start_ts": { - "$id": "#/properties/packets/properties/period/properties/start_ts", - "type": "integer", - "title": "The start_ts schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 1614874231 - ] - } - }, - "additionalProperties": false - }, - "payload_size_bytes": { - "$id": "#/properties/packets/properties/payload_size_bytes", - "type": "object", - "title": "The payload_size_bytes schema", - "description": "An explanation about the purpose of this instance.", - "default": {}, - "examples": [ - {} - ] - }, - "tcp_packets": { - "$id": "#/properties/packets/properties/tcp_packets", - "type": "integer", - "title": "The tcp_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 13176 - ] - }, - "tcp": { - "$id": "#/properties/packets/properties/tcp", - "type": "object", - "title": "The tcp schema", - "description": "An explanation about the purpose of this instance.", - "default": {}, - "examples": [ - { - "syn_packets": 10 - } - ], - "required": [ - "syn_packets" - ], - "properties": { - "syn_packets": { - "$id": "#/properties/packets/properties/tcp/properties/syn_packets", - "type": "object", - "title": "The syn_packets flag schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 10 - ] - } - } - }, - "top_asn_packets": { - "$id": "#/properties/packets/properties/top_asn_packets", - "type": "array", - "title": "The top_asn_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": [], - "examples": [ - [] - ], - "additionalItems": true, - "items": { - "$id": "#/properties/packets/properties/top_asn_packets/items" - } - }, - "top_geo_loc_packets": { - "$id": "#/properties/packets/properties/top_geo_loc_packets", - "type": "array", - "title": "The top_geo_loc_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": [], - "examples": [ - [] - ], - "additionalItems": true, - "items": { - "$id": "#/properties/packets/properties/top_geo_loc_packets/items" - } - }, - "top_ipv4_packets": { - "$id": "#/properties/packets/properties/top_ipv4_packets", - "type": "array", - "title": "The top_ipv4_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": [], - "examples": [ - [ - { - "estimate": 16147, - "name": "8.8.8.8" - } - ] - ], - "additionalItems": true, - "items": { - "$id": "#/properties/packets/properties/top_ipv4_packets/items", - "anyOf": [ - { - "$id": "#/properties/packets/properties/top_ipv4_packets/items/anyOf/0", - "type": "object", - "title": "The first anyOf schema", - "description": "An explanation about the purpose of this instance.", - "default": {}, - "examples": [ - { - "estimate": 16147, - "name": "8.8.8.8" - } - ], - "required": [ - "estimate", - "name" - ], - "properties": { - "estimate": { - "$id": "#/properties/packets/properties/top_ipv4_packets/items/anyOf/0/properties/estimate", - "type": "integer", - "title": "The estimate schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 16147 - ] - }, - "name": { - "$id": "#/properties/packets/properties/top_ipv4_packets/items/anyOf/0/properties/name", - "type": "string", - "title": "The name schema", - "description": "An explanation about the purpose of this instance.", - "default": "", - "examples": [ - "8.8.8.8" - ] - } - }, - "additionalProperties": false - } - ] - } - }, - "top_ipv6_packets": { - "$id": "#/properties/packets/properties/top_ipv6_packets", - "type": "array", - "title": "The top_ipv6_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": [], - "examples": [ - [] - ], - "additionalItems": true, - "items": { - "$id": "#/properties/packets/properties/top_ipv6_packets/items" - } - }, - "total_packets": { - "$id": "#/properties/packets/properties/total_packets", - "type": "integer", - "title": "The total_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 16147 - ] - }, - "udp_packets": { - "$id": "#/properties/packets/properties/udp_packets", - "type": "integer", - "title": "The udp_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 2971 - ] - }, - "unknown_dir_packets": { - "$id": "#/properties/packets/properties/unknown_dir_packets", - "type": "integer", - "title": "The unknown_dir_packets schema", - "description": "An explanation about the purpose of this instance.", - "default": 0, - "examples": [ - 0 - ] - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false -} diff --git a/src/handlers/netprobe/NetProbeStreamHandler.cpp b/src/handlers/netprobe/NetProbeStreamHandler.cpp index f41c21a50..6e338573d 100644 --- a/src/handlers/netprobe/NetProbeStreamHandler.cpp +++ b/src/handlers/netprobe/NetProbeStreamHandler.cpp @@ -34,17 +34,6 @@ void NetProbeStreamHandler::start() _metrics->set_recorded_stream(); } - if (config_exists("xact_ttl_ms")) { - auto ttl = config_get("xact_ttl_ms"); - _metrics->set_xact_ttl(static_cast(ttl)); - } else if (config_exists("xact_ttl_secs")) { - auto ttl = config_get("xact_ttl_secs"); - _metrics->set_xact_ttl(static_cast(ttl) * 1000); - } else if (_netprobe_proxy->config_exists("xact_ttl_ms")) { - auto ttl = _netprobe_proxy->config_get("xact_ttl_ms"); - _metrics->set_xact_ttl(static_cast(ttl)); - } - if (_netprobe_proxy) { _probe_send_connection = _netprobe_proxy->probe_send_signal.connect(&NetProbeStreamHandler::probe_signal_send, this); _probe_recv_connection = _netprobe_proxy->probe_recv_signal.connect(&NetProbeStreamHandler::probe_signal_recv, this); @@ -120,7 +109,6 @@ void NetProbeMetricsBucket::specialized_merge(const AbstractMetricsBucket &o, Me _targets_metrics[targetId]->attempts += target.second->attempts; _targets_metrics[targetId]->successes += target.second->successes; _targets_metrics[targetId]->dns_failures += target.second->dns_failures; - _targets_metrics[targetId]->timed_out += target.second->timed_out; } if (group_enabled(group::NetProbeMetrics::Histograms)) { _targets_metrics[targetId]->h_time_us.merge(target.second->h_time_us); @@ -144,7 +132,6 @@ void NetProbeMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelM target.second->attempts.to_prometheus(out, target_labels); target.second->successes.to_prometheus(out, target_labels); target.second->dns_failures.to_prometheus(out, target_labels); - target.second->timed_out.to_prometheus(out, target_labels); } bool h_max_min{true}; @@ -186,61 +173,6 @@ void NetProbeMetricsBucket::to_prometheus(std::stringstream &out, Metric::LabelM } } -void NetProbeMetricsBucket::to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels) const -{ - std::shared_lock r_lock(_mutex); - - for (const auto &target : _targets_metrics) { - auto target_labels = add_labels; - auto targetId = target.first; - target_labels["target"] = targetId; - - if (group_enabled(group::NetProbeMetrics::Counters)) { - target.second->attempts.to_opentelemetry(scope, start_ts, end_ts, target_labels); - target.second->successes.to_opentelemetry(scope, start_ts, end_ts, target_labels); - target.second->dns_failures.to_opentelemetry(scope, start_ts, end_ts, target_labels); - target.second->timed_out.to_opentelemetry(scope, start_ts, end_ts, target_labels); - } - - bool h_max_min{true}; - if (group_enabled(group::NetProbeMetrics::Histograms)) { - try { - target.second->minimum.clear(); - target.second->maximum.clear(); - - if (group_enabled(group::NetProbeMetrics::Counters)) { - target.second->minimum += target.second->h_time_us.get_min(); - target.second->minimum.to_opentelemetry(scope, start_ts, end_ts, target_labels); - target.second->maximum += target.second->h_time_us.get_max(); - target.second->maximum.to_opentelemetry(scope, start_ts, end_ts, target_labels); - } - - target.second->h_time_us.to_opentelemetry(scope, start_ts, end_ts, target_labels); - } catch (const std::exception &) { - h_max_min = false; - } - } else { - h_max_min = false; - } - - if (group_enabled(group::NetProbeMetrics::Quantiles)) { - try { - if (!h_max_min && group_enabled(group::NetProbeMetrics::Counters)) { - target.second->minimum.clear(); - target.second->maximum.clear(); - - target.second->minimum += target.second->q_time_us.get_min(); - target.second->minimum.to_opentelemetry(scope, start_ts, end_ts, target_labels); - target.second->maximum += target.second->q_time_us.get_max(); - target.second->maximum.to_opentelemetry(scope, start_ts, end_ts, target_labels); - } - target.second->q_time_us.to_opentelemetry(scope, start_ts, end_ts, target_labels); - } catch (const std::exception &) { - } - } - } -} - void NetProbeMetricsBucket::to_json(json &j) const { @@ -253,7 +185,6 @@ void NetProbeMetricsBucket::to_json(json &j) const target.second->attempts.to_json(j["targets"][targetId]); target.second->successes.to_json(j["targets"][targetId]); target.second->dns_failures.to_json(j["targets"][targetId]); - target.second->timed_out.to_json(j["targets"][targetId]); } bool h_max_min{true}; @@ -310,7 +241,6 @@ void NetProbeMetricsBucket::process_failure(ErrorType error, const std::string & ++_targets_metrics[target]->dns_failures; break; case ErrorType::Timeout: - ++_targets_metrics[target]->timed_out; case ErrorType::SocketError: case ErrorType::InvalidIp: case ErrorType::ConnectionFailure: @@ -374,16 +304,14 @@ void NetProbeMetricsManager::process_netprobe_icmp(pcpp::IcmpLayer *layer, const if (layer->getMessageType() == pcpp::ICMP_ECHO_REQUEST) { if (auto request = layer->getEchoRequestData(); request != nullptr) { - _request_reply_manager->start_transaction((static_cast(request->header->id) << 16) | request->header->sequence, {{stamp, {0, 0}}, target}); + _request_reply_manager.start_transaction((static_cast(request->header->id) << 16) | request->header->sequence, {{stamp, {0, 0}}, target}); } live_bucket()->process_attempts(_deep_sampling_now, target); } else if (layer->getMessageType() == pcpp::ICMP_ECHO_REPLY) { if (auto reply = layer->getEchoReplyData(); reply != nullptr) { - auto xact = _request_reply_manager->maybe_end_transaction((static_cast(reply->header->id) << 16) | reply->header->sequence, stamp); + auto xact = _request_reply_manager.maybe_end_transaction((static_cast(reply->header->id) << 16) | reply->header->sequence, stamp); if (xact.first == Result::Valid) { live_bucket()->new_transaction(_deep_sampling_now, xact.second); - } else if (xact.first == Result::TimedOut) { - live_bucket()->process_failure(ErrorType::Timeout, xact.second.target); } } } @@ -395,14 +323,12 @@ void NetProbeMetricsManager::process_netprobe_tcp(uint32_t port, bool send, cons new_event(stamp); if (send) { - _request_reply_manager->start_transaction(port, {{stamp, {0, 0}}, target}); + _request_reply_manager.start_transaction(port, {{stamp, {0, 0}}, target}); live_bucket()->process_attempts(_deep_sampling_now, target); } else { - auto xact = _request_reply_manager->maybe_end_transaction(port, stamp); + auto xact = _request_reply_manager.maybe_end_transaction(port, stamp); if (xact.first == Result::Valid) { live_bucket()->new_transaction(_deep_sampling_now, xact.second); - } else if (xact.first == Result::TimedOut) { - live_bucket()->process_failure(ErrorType::Timeout, xact.second.target); } } } diff --git a/src/handlers/netprobe/NetProbeStreamHandler.h b/src/handlers/netprobe/NetProbeStreamHandler.h index 0841fd48f..78d1aabc6 100644 --- a/src/handlers/netprobe/NetProbeStreamHandler.h +++ b/src/handlers/netprobe/NetProbeStreamHandler.h @@ -15,8 +15,8 @@ #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" #endif +#include "TcpLayerInternal.h" #include -#include #ifdef __GNUC__ #pragma GCC diagnostic pop #endif @@ -50,7 +50,6 @@ struct Target { Counter minimum; Counter maximum; Counter dns_failures; - Counter timed_out; Target() : q_time_us(NET_PROBE_SCHEMA, {"response_quantiles_us"}, "Net Probe quantile in microseconds") @@ -60,7 +59,6 @@ struct Target { , minimum(NET_PROBE_SCHEMA, {"response_min_us"}, "Minimum response time measured in the reporting interval") , maximum(NET_PROBE_SCHEMA, {"response_max_us"}, "Maximum response time measured in the reporting interval") , dns_failures(NET_PROBE_SCHEMA, {"dns_lookup_failures"}, "Total Net Probe failures when performed DNS lookup") - , timed_out(NET_PROBE_SCHEMA, {"packets_timeout"}, "Total Net Probe timeout transactions") { } }; @@ -81,7 +79,6 @@ class NetProbeMetricsBucket final : public visor::AbstractMetricsBucket void specialized_merge(const AbstractMetricsBucket &other, Metric::Aggregate agg_operator) override; void to_json(json &j) const override; void to_prometheus(std::stringstream &out, Metric::LabelMap add_labels = {}) const override; - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &start_ts, timespec &end_ts, Metric::LabelMap add_labels = {}) const override; void update_topn_metrics(size_t, uint64_t) override { } @@ -98,25 +95,18 @@ class NetProbeMetricsBucket final : public visor::AbstractMetricsBucket class NetProbeMetricsManager final : public visor::AbstractMetricsManager { - typedef TransactionManager> NetProbeTransactionManager; - std::unique_ptr _request_reply_manager; + TransactionManager> _request_reply_manager; public: NetProbeMetricsManager(const Configurable *window_config) : visor::AbstractMetricsManager(window_config) - , _request_reply_manager(std::make_unique()) { } - void on_period_shift([[maybe_unused]] timespec stamp, [[maybe_unused]] const NetProbeMetricsBucket *maybe_expiring_bucket) override + void on_period_shift(timespec stamp, [[maybe_unused]] const NetProbeMetricsBucket *maybe_expiring_bucket) override { - // Clear all old transactions - _request_reply_manager->clear(); - } - - void set_xact_ttl(uint32_t ttl) - { - _request_reply_manager = std::make_unique(ttl); + // NetProbe transaction support + _request_reply_manager.purge_old_transactions(stamp); } void process_filtered(timespec stamp); @@ -136,9 +126,7 @@ class NetProbeStreamHandler final : public visor::StreamMetricsHandler("flow_type"); if (flow_type == "sflow") { _flow_type = Type::SFLOW; - } else if (flow_type == "netflow" || flow_type == "ipfix") { + } else if (flow_type == "netflow") { _flow_type = Type::NETFLOW; } else { - throw FlowException(fmt::format("invalid flow_type \"{}\". Supported types: \"sflow\", \"netflow\" and \"ipfix\"", flow_type)); + throw FlowException(fmt::format("invalid flow_type \"{}\". Supported types: \"sflow\" and \"netflow\"", flow_type)); } } else { _logger->warn("flow_type not specified, using sflow"); @@ -119,7 +119,7 @@ void FlowInputStream::_read_from_pcap_file() static_cast(proxy.get())->netflow_cb(src_ip, sample, rawPacket.getRawDataLen()); } } else { - _logger->error("invalid netflow or ipfix packet"); + _logger->error("invalid netflow packet"); } } } diff --git a/src/inputs/flow/NetflowData.h b/src/inputs/flow/NetflowData.h index cea33a089..8e06e2a34 100644 --- a/src/inputs/flow/NetflowData.h +++ b/src/inputs/flow/NetflowData.h @@ -29,7 +29,7 @@ struct NFSample { struct Flows { bool is_ipv6 = false; - std::array src_ip{}, dst_ip{}, nexthop_ip{}; + uint32_t src_ip{0}, dst_ip{0}, nexthop_ip{0}; uint16_t if_index_in{0}, if_index_out{0}; uint32_t flow_packets{0}, flow_octets{0}; uint32_t flow_start{0}, flow_finish{0}; @@ -53,8 +53,8 @@ struct hash_pair { }; using NfMapID = std::pair; -static robin_hood::unordered_node_map nf9_template_map; -static robin_hood::unordered_node_map nf10_template_map; +static robin_hood::unordered_map nf9_template_map; +static robin_hood::unordered_map nf10_template_map; static bool process_netflow_v1(NFSample *sample) { @@ -83,9 +83,9 @@ static bool process_netflow_v1(NFSample *sample) flow_sample.protocol = nf1_flow->protocol; flow_sample.tos = nf1_flow->tos; - memcpy(flow_sample.src_ip.data(), &nf1_flow->src_ip, sizeof(uint32_t)); - memcpy(flow_sample.dst_ip.data(), &nf1_flow->dest_ip, sizeof(uint32_t)); - memcpy(flow_sample.nexthop_ip.data(), &nf1_flow->nexthop_ip, sizeof(uint32_t)); + flow_sample.src_ip = nf1_flow->src_ip; + flow_sample.dst_ip = nf1_flow->dest_ip; + flow_sample.nexthop_ip = nf1_flow->nexthop_ip; flow_sample.src_port = nf1_flow->src_port; flow_sample.dst_port = nf1_flow->dest_port; @@ -132,9 +132,9 @@ static bool process_netflow_v5(NFSample *sample) flow_sample.protocol = nf5_flow->protocol; flow_sample.tos = nf5_flow->tos; - memcpy(flow_sample.src_ip.data(), &nf5_flow->src_ip, sizeof(uint32_t)); - memcpy(flow_sample.dst_ip.data(), &nf5_flow->dest_ip, sizeof(uint32_t)); - memcpy(flow_sample.nexthop_ip.data(), &nf5_flow->nexthop_ip, sizeof(uint32_t)); + flow_sample.src_ip = nf5_flow->src_ip; + flow_sample.dst_ip = nf5_flow->dest_ip; + flow_sample.nexthop_ip = nf5_flow->nexthop_ip; flow_sample.src_port = nf5_flow->src_port; flow_sample.dst_port = nf5_flow->dest_port; @@ -186,9 +186,9 @@ static bool process_netflow_v7(NFSample *sample) flow_sample.protocol = nf7_flow->protocol; flow_sample.tos = nf7_flow->tos; - memcpy(flow_sample.src_ip.data(), &nf7_flow->src_ip, sizeof(uint32_t)); - memcpy(flow_sample.dst_ip.data(), &nf7_flow->dest_ip, sizeof(uint32_t)); - memcpy(flow_sample.nexthop_ip.data(), &nf7_flow->nexthop_ip, sizeof(uint32_t)); + flow_sample.src_ip = nf7_flow->src_ip; + flow_sample.dst_ip = nf7_flow->dest_ip; + flow_sample.nexthop_ip = nf7_flow->nexthop_ip; flow_sample.src_port = nf7_flow->src_port; flow_sample.dst_port = nf7_flow->dest_port; @@ -215,21 +215,9 @@ static bool process_netflow_v7(NFSample *sample) static inline void be_copy(uint8_t *data, uint8_t *target, uint32_t target_length, uint32_t rec_length) { if (target_length < rec_length) { - // truncate for known common types - if (target_length == sizeof(uint16_t) && rec_length == sizeof(uint32_t)) { - uint32_t value{0}; - memcpy(&value, data, rec_length); - uint16_t truncated = static_cast(value >> 16); - memcpy(target, &truncated, target_length); - } else if (target_length == sizeof(uint32_t) && rec_length == sizeof(uint64_t)) { - uint64_t value{0}; - memcpy(&value, data, rec_length); - uint32_t truncated = static_cast(value >> 32); - memcpy(target, &truncated, target_length); - } - } else { - memcpy(target + (target_length - rec_length), data, rec_length); + return; } + memcpy(target + (target_length - rec_length), data, rec_length); } static bool process_netflow_v9_template(uint8_t *pkt, size_t len, uint32_t source_id) @@ -237,8 +225,8 @@ static bool process_netflow_v9_template(uint8_t *pkt, size_t len, uint32_t sourc struct NF9_FLOWSET_HEADER_COMMON *template_header; struct NF9_TEMPLATE_FLOWSET_HEADER *tmplh; struct NF9_TEMPLATE_FLOWSET_RECORD *tmplr; - uint16_t template_id; - uint32_t i, count, offset, total_size; + uint32_t i, count, offset, template_id, total_size; + peer_nf9_template nf9_template; template_header = reinterpret_cast(pkt); if (len < sizeof(*template_header)) { @@ -260,19 +248,26 @@ static bool process_netflow_v9_template(uint8_t *pkt, size_t len, uint32_t sourc std::vector template_recs; for (i = 0; i < count; i++) { if (offset >= len) { - return false; + break; } tmplr = reinterpret_cast(pkt + offset); uint32_t rec_length = be16toh(tmplr->length); + uint32_t rec_type = be16toh(tmplr->type); - peer_nf9_record recs(be16toh(tmplr->type), rec_length); + peer_nf9_record recs(rec_type, rec_length); offset += sizeof(*tmplr); template_recs.push_back(recs); total_size += rec_length; } + nf9_template.template_id = template_id; + nf9_template.source_id = source_id; + nf9_template.num_records = count; + nf9_template.records = template_recs; + nf9_template.num_records = i; + nf9_template.total_len = total_size; - nf9_template_map[NfMapID(source_id, template_id)] = {template_id, source_id, count, total_size, template_recs}; + nf9_template_map[NfMapID(source_id, template_id)] = nf9_template; } return true; @@ -315,15 +310,17 @@ static void nf9_rec_to_flow(NFSample::Flows *flow, struct peer_nf9_record *rec, V9_FIELD_32(NF9_FIRST_SWITCHED, &flow->flow_start, sizeof(flow->flow_start)); V9_FIELD(NF9_IPV6_SRC_MASK, &flow->src_mask, sizeof(flow->src_mask)); V9_FIELD(NF9_IPV6_DST_MASK, &flow->dst_mask, sizeof(flow->dst_mask)); - V9_FIELD(NF9_IPV4_SRC_ADDR, &flow->src_ip, sizeof(uint32_t)); + V9_FIELD(NF9_IPV4_SRC_ADDR, &flow->src_ip, sizeof(flow->src_ip)); V9_FIELD(NF9_IPV6_SRC_ADDR, &flow->src_ip, sizeof(flow->src_ip)); - V9_FIELD(NF9_IPV4_DST_ADDR, &flow->dst_ip, sizeof(uint32_t)); + V9_FIELD(NF9_IPV4_DST_ADDR, &flow->dst_ip, sizeof(flow->dst_ip)); V9_FIELD(NF9_IPV6_DST_ADDR, &flow->dst_ip, sizeof(flow->dst_ip)); - V9_FIELD(NF9_IPV4_NEXT_HOP, &flow->nexthop_ip, sizeof(uint32_t)); + V9_FIELD(NF9_IPV4_NEXT_HOP, &flow->nexthop_ip, sizeof(flow->nexthop_ip)); V9_FIELD(NF9_IPV6_NEXT_HOP, &flow->nexthop_ip, sizeof(flow->nexthop_ip)); case NF9_IP_PROTOCOL_VERSION: - if (*data == 6) { + uint8_t version = 0; + be_copy(data, reinterpret_cast(&version), sizeof(version), rec->len); + if (version == 6) { flow->is_ipv6 = true; } break; @@ -490,15 +487,17 @@ static void nf10_rec_to_flow(NFSample::Flows *flow, struct peer_nf10_record *rec V10_FIELD_32(NF10_FIRST_SWITCHED, &flow->flow_start, sizeof(flow->flow_start)); V10_FIELD(NF10_IPV6_SRC_MASK, &flow->src_mask, sizeof(flow->src_mask)); V10_FIELD(NF10_IPV6_DST_MASK, &flow->dst_mask, sizeof(flow->dst_mask)); - V10_FIELD(NF10_IPV4_SRC_ADDR, &flow->src_ip, sizeof(uint32_t)); + V10_FIELD(NF10_IPV4_SRC_ADDR, &flow->src_ip, sizeof(flow->src_ip)); V10_FIELD(NF10_IPV6_SRC_ADDR, &flow->src_ip, sizeof(flow->src_ip)); - V10_FIELD(NF10_IPV4_DST_ADDR, &flow->dst_ip, sizeof(uint32_t)); + V10_FIELD(NF10_IPV4_DST_ADDR, &flow->dst_ip, sizeof(flow->dst_ip)); V10_FIELD(NF10_IPV6_DST_ADDR, &flow->dst_ip, sizeof(flow->dst_ip)); - V10_FIELD(NF10_IPV4_NEXT_HOP, &flow->nexthop_ip, sizeof(uint32_t)); + V10_FIELD(NF10_IPV4_NEXT_HOP, &flow->nexthop_ip, sizeof(flow->nexthop_ip)); V10_FIELD(NF10_IPV6_NEXT_HOP, &flow->nexthop_ip, sizeof(flow->nexthop_ip)); case NF10_IP_PROTOCOL_VERSION: - if (*data == 6) { + uint8_t version = 0; + be_copy(data, reinterpret_cast(&version), sizeof(version), rec->len); + if (version == 6) { flow->is_ipv6 = true; } break; @@ -525,6 +524,7 @@ static bool process_netflow_v10_data(std::vector *flows, uint8_ } flowset_id = be16toh(dath->c.flowset_id); + auto iter = nf10_template_map.find(NfMapID(source_id, flowset_id)); if (iter == nf10_template_map.end()) { return false; @@ -548,8 +548,7 @@ static bool process_netflow_v10_data(std::vector *flows, uint8_ NFSample::Flows flow = {}; for (j = 0; j < nf10_template.num_records; j++) { nf10_rec_to_flow(&flow, &nf10_template.records[j], pkt + offset + offset_recs); - // i.e. if variable size, first byte contains the size - (nf10_template.records[j].len == 0xFFFF) ? offset_recs += (*(pkt + offset + offset_recs) + 1) : offset_recs += nf10_template.records[j].len; + offset_recs += nf10_template.records[j].len; } flows->push_back(flow); offset += nf10_template.total_len; @@ -563,8 +562,8 @@ static bool process_netflow_v10_template(uint8_t *pkt, size_t len, uint32_t sour struct NF10_FLOWSET_HEADER_COMMON *template_header; struct NF10_TEMPLATE_FLOWSET_HEADER *tmplh; struct NF10_TEMPLATE_FLOWSET_RECORD *tmplr; - uint16_t template_id; - uint32_t i, count, offset, total_size; + uint32_t i, count, offset, template_id, total_size; + peer_nf10_template nf10_template; template_header = reinterpret_cast(pkt); if (len < sizeof(*template_header)) { @@ -586,21 +585,26 @@ static bool process_netflow_v10_template(uint8_t *pkt, size_t len, uint32_t sour std::vector template_recs; for (i = 0; i < count; i++) { if (offset >= len) { - return false; + break; } tmplr = reinterpret_cast(pkt + offset); - uint32_t rec_type = be16toh(tmplr->type); uint32_t rec_length = be16toh(tmplr->length); + uint32_t rec_type = be16toh(tmplr->type); peer_nf10_record recs(rec_type, rec_length); offset += sizeof(*tmplr); - (rec_type & NF10_ENTERPRISE) ? offset += sizeof(uint32_t) : offset; template_recs.push_back(recs); - (rec_length == 0xFFFF) ? total_size++ : total_size += rec_length; // i.e. variable length + total_size += rec_length; } - - nf10_template_map[NfMapID(source_id, template_id)] = {template_id, source_id, count, total_size, template_recs}; + nf10_template.template_id = template_id; + nf10_template.source_id = source_id; + nf10_template.num_records = count; + nf10_template.records = template_recs; + nf10_template.num_records = i; + nf10_template.total_len = total_size; + + nf10_template_map[NfMapID(source_id, template_id)] = nf10_template; } return true; diff --git a/src/inputs/netprobe/CMakeLists.txt b/src/inputs/netprobe/CMakeLists.txt index 63a39a1ac..5b9fe73e1 100644 --- a/src/inputs/netprobe/CMakeLists.txt +++ b/src/inputs/netprobe/CMakeLists.txt @@ -20,7 +20,6 @@ target_include_directories(VisorInputNetProbe target_link_libraries(VisorInputNetProbe PUBLIC Visor::Core - Visor::Lib::Tcp ${CONAN_LIBS_LIBUV} ${CONAN_LIBS_UVW} ${CONAN_LIBS_PCAPPLUSPLUS} diff --git a/src/inputs/netprobe/NetProbeInputStream.cpp b/src/inputs/netprobe/NetProbeInputStream.cpp index f7f77cf41..02e989ed2 100644 --- a/src/inputs/netprobe/NetProbeInputStream.cpp +++ b/src/inputs/netprobe/NetProbeInputStream.cpp @@ -270,8 +270,6 @@ void NetProbeInputStream::info_json(json &j) const std::unique_ptr NetProbeInputStream::create_event_proxy(const Configurable &filter) { - auto custom_filter = filter; - custom_filter.config_set("xact_ttl_ms", _timeout_msec); - return std::make_unique(_name, custom_filter); + return std::make_unique(_name, filter); } } diff --git a/src/inputs/netprobe/PingProbe.cpp b/src/inputs/netprobe/PingProbe.cpp index 8404515f1..98d0896f7 100644 --- a/src/inputs/netprobe/PingProbe.cpp +++ b/src/inputs/netprobe/PingProbe.cpp @@ -1,15 +1,13 @@ #include "PingProbe.h" #include "NetProbeException.h" -#include "ThreadName.h" #include #include #include namespace visor::input::netprobe { -std::vector> PingReceiver::recv_packets{}; -std::unique_ptr PingProbe::_receiver{nullptr}; +sigslot::signal PingReceiver::recv_signal; thread_local std::atomic PingProbe::sock_count{0}; thread_local SOCKET PingProbe::_sock{INVALID_SOCKET}; @@ -19,6 +17,7 @@ PingReceiver::PingReceiver() } PingReceiver::~PingReceiver() { + recv_signal.disconnect_all(); _poll->close(); if (_async_h && _io_thread) { // we have to use AsyncHandle to stop the loop from the same thread the loop is running in @@ -93,29 +92,17 @@ void PingReceiver::_setup_receiver() timeval time; TIMESPEC_TO_TIMEVAL(&time, &stamp); pcpp::RawPacket raw(reinterpret_cast(_array.data()), rc, time, false, pcpp::LINKTYPE_DLT_RAW1); - _recv_packets.emplace_back(pcpp::Packet(&raw, pcpp::ICMP), stamp); + pcpp::Packet packet(&raw, pcpp::ICMP); + recv_signal(packet, stamp); } } }); - _timer = _io_loop->resource(); - _timer->on([this](const auto &, auto &) { - if (!_recv_packets.empty()) { - recv_packets = _recv_packets; - _recv_packets.clear(); - for (const auto &callback : _callbacks) { - callback->send(); - } - } - }); - _timer->start(uvw::TimerHandle::Time{100}, uvw::TimerHandle::Time{100}); - _poll->init(); _poll->start(uvw::PollHandle::Event::READABLE); // spawn the loop _io_thread = std::make_unique([this] { - thread::change_self_name("receiver", "ping"); _io_loop->run(); }); } @@ -140,10 +127,8 @@ bool PingProbe::start(std::shared_ptr io_loop) _io_loop = io_loop; - if (!_receiver) { - // only once - _receiver = std::make_unique(); - } + // execute once + static auto receiver = std::make_unique(); _interval_timer = _io_loop->resource(); if (!_interval_timer) { @@ -152,6 +137,20 @@ bool PingProbe::start(std::shared_ptr io_loop) _interval_timer->on([this](const auto &, auto &) { _internal_sequence = 0; + _timeout_timer = _io_loop->resource(); + if (!_timeout_timer) { + throw NetProbeException("PingProbe - unable to initialize timeout TimerHandle"); + } + + _timeout_timer->on([this](const auto &, auto &) { + _internal_sequence = _config.packets_per_test; + _fail(ErrorType::Timeout, TestType::Ping, _name); + if (_internal_timer) { + _internal_timer->stop(); + } + _interval_timer->again(); + }); + if (auto error = _create_socket(); error.has_value()) { _fail(error.value(), TestType::Ping, _name); return; @@ -163,34 +162,24 @@ bool PingProbe::start(std::shared_ptr io_loop) } _internal_timer = _io_loop->resource(); - _internal_timer->on([this](const auto &, auto &handle) { + _internal_timer->on([this](const auto &, auto &) { if (_internal_sequence < static_cast(_config.packets_per_test)) { _internal_sequence++; + _timeout_timer->stop(); + _timeout_timer->start(uvw::TimerHandle::Time{_config.timeout_msec}, uvw::TimerHandle::Time{0}); _send_icmp_v4(_internal_sequence); - } else { - handle.stop(); - handle.close(); } }); + _recv_connection = PingReceiver::recv_signal.connect([this](pcpp::Packet &packet, timespec stamp) { _recv(packet, TestType::Ping, _name, stamp); }); + (_sequence == UCHAR_MAX) ? _sequence = 0 : _sequence++; _send_icmp_v4(_internal_sequence); _internal_sequence++; + _timeout_timer->start(uvw::TimerHandle::Time{_config.timeout_msec}, uvw::TimerHandle::Time{0}); _internal_timer->start(uvw::TimerHandle::Time{_config.packets_interval_msec}, uvw::TimerHandle::Time{_config.packets_interval_msec}); }); - _recv_handler = _io_loop->resource(); - if (!_recv_handler) { - throw NetProbeException("PingProbe - unable to initialize AsyncHandle receiver"); - } - _recv_handler->on([this](const auto &, auto &) { - for (auto &[packet, stamp] : PingReceiver::recv_packets) { - _recv(packet, TestType::Ping, _name, stamp); - } - }); - _receiver->register_async_callback(_recv_handler); - _recv_handler->init(); - ++sock_count; _interval_timer->start(uvw::TimerHandle::Time{0}, uvw::TimerHandle::Time{_config.interval_msec}); _init = true; @@ -201,12 +190,8 @@ bool PingProbe::stop() { if (_interval_timer) { _interval_timer->stop(); - _interval_timer->close(); - } - if (_recv_handler) { - _receiver->remove_async_callback(_recv_handler); - _recv_handler->close(); } + _recv_connection.disconnect(); _close_socket(); return true; } diff --git a/src/inputs/netprobe/PingProbe.h b/src/inputs/netprobe/PingProbe.h index 01288ad46..2c1e413e8 100644 --- a/src/inputs/netprobe/PingProbe.h +++ b/src/inputs/netprobe/PingProbe.h @@ -35,10 +35,8 @@ typedef int SOCKET; #include #include #include -#include #include #include -#include #include #include @@ -58,26 +56,14 @@ class PingReceiver std::unique_ptr _io_thread; std::shared_ptr _io_loop; std::shared_ptr _async_h; - std::vector> _callbacks; - std::shared_ptr _timer; - std::vector> _recv_packets; + void _setup_receiver(); public: - static std::vector> recv_packets; + static sigslot::signal recv_signal; PingReceiver(); ~PingReceiver(); - - void register_async_callback(std::shared_ptr callback) - { - _callbacks.push_back(callback); - } - - void remove_async_callback(std::shared_ptr callback) - { - _callbacks.erase(std::remove(_callbacks.begin(), _callbacks.end(), callback), _callbacks.end()); - } }; /** @@ -89,7 +75,6 @@ class PingReceiver */ class PingProbe final : public NetProbe { - static std::unique_ptr _receiver; static thread_local SOCKET _sock; bool _init{false}; @@ -99,11 +84,12 @@ class PingProbe final : public NetProbe uint8_t _internal_sequence{0}; std::shared_ptr _interval_timer; std::shared_ptr _internal_timer; - std::shared_ptr _recv_handler; + std::shared_ptr _timeout_timer; SOCKETLEN _sin_length{0}; std::vector _payload_array; sockaddr_in _sa; sockaddr_in6 _sa6; + sigslot::connection _recv_connection; void _send_icmp_v4(uint8_t sequence); std::optional _get_addr(); diff --git a/src/inputs/netprobe/TcpLayerInternal.h b/src/inputs/netprobe/TcpLayerInternal.h new file mode 100644 index 000000000..1da9d1801 --- /dev/null +++ b/src/inputs/netprobe/TcpLayerInternal.h @@ -0,0 +1,13 @@ +#pragma once + +// TCPOPT_CC, TCPOPT_CCNEW and TCPOPT_CCECHO are defined in the MacOS's tcp.h. +#ifdef TCPOPT_CC +#undef TCPOPT_CC +#endif // TCPOPT_CC +#ifdef TCPOPT_CCNEW +#undef TCPOPT_CCNEW +#endif // TCPOPT_CCNEW +#ifdef TCPOPT_CCECHO +#undef TCPOPT_CCECHO +#endif // TCPOPT_CCECHO +#include \ No newline at end of file diff --git a/src/inputs/netprobe/TcpProbe.cpp b/src/inputs/netprobe/TcpProbe.cpp index 625cdd0bb..6237e09ac 100644 --- a/src/inputs/netprobe/TcpProbe.cpp +++ b/src/inputs/netprobe/TcpProbe.cpp @@ -7,7 +7,7 @@ #pragma GCC diagnostic ignored "-Wold-style-cast" #pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" #endif -#include +#include "TcpLayerInternal.h" #ifdef __GNUC__ #pragma GCC diagnostic pop #endif diff --git a/src/inputs/pcap/PcapInputStream.cpp b/src/inputs/pcap/PcapInputStream.cpp index fd04cc7c3..8a72162a4 100644 --- a/src/inputs/pcap/PcapInputStream.cpp +++ b/src/inputs/pcap/PcapInputStream.cpp @@ -70,7 +70,6 @@ static void _pcap_stats_update(pcpp::IPcapDevice::PcapStats &stats, void *cookie PcapInputStream::PcapInputStream(const std::string &name) : visor::InputStream(name) - , _lru_list(std::make_unique>(DEFAULT_LRULIST_SIZE)) , _pcapDevice(nullptr) , _tcp_reassembly(_tcp_message_ready_cb, this, @@ -94,10 +93,6 @@ void PcapInputStream::start() validate_configs(_config_defs); - if (config_exists("tcp_packet_reassembly_cache_limit")) { - auto limit = config_get("tcp_packet_reassembly_cache_limit"); - _lru_list = std::make_unique>(limit); - } if (config_exists("pcap_file")) { // read from pcap file. this is a special case from a command line utility if (!config_exists("bpf")) { @@ -257,9 +252,7 @@ void PcapInputStream::tcp_message_ready(int8_t side, const pcpp::TcpStreamData & for (auto &proxy : _event_proxies) { dynamic_cast(proxy.get())->tcp_message_ready_cb(side, tcpData, _packet_dir_cache); } - if (_lru_list->put(tcpData.getConnectionData().flowKey, tcpData.getConnectionData().endTime, &_deleted_data)){ - _lru_overflow.push_back(_deleted_data.first); - } + _lru_list.put(tcpData.getConnectionData().flowKey, tcpData.getConnectionData().endTime); } void PcapInputStream::tcp_connection_start(const pcpp::ConnectionData &connectionData) @@ -268,9 +261,7 @@ void PcapInputStream::tcp_connection_start(const pcpp::ConnectionData &connectio for (auto &proxy : _event_proxies) { dynamic_cast(proxy.get())->tcp_connection_start_cb(connectionData, _packet_dir_cache); } - if (_lru_list->put(connectionData.flowKey, connectionData.startTime, &_deleted_data)) { - _lru_overflow.push_back(_deleted_data.first); - } + _lru_list.put(connectionData.flowKey, connectionData.startTime); } void PcapInputStream::tcp_connection_end(const pcpp::ConnectionData &connectionData, pcpp::TcpReassembly::ConnectionEndReason reason) @@ -279,7 +270,7 @@ void PcapInputStream::tcp_connection_end(const pcpp::ConnectionData &connectionD for (auto &proxy : _event_proxies) { static_cast(proxy.get())->tcp_connection_end_cb(connectionData, reason); } - _lru_list->eraseElement(connectionData.flowKey); + _lru_list.eraseElement(connectionData.flowKey); } void PcapInputStream::process_pcap_stats(const pcpp::IPcapDevice::PcapStats &stats) @@ -402,15 +393,15 @@ void PcapInputStream::process_raw_packet(pcpp::RawPacket *rawPacket) auto IP4layer = packet.getLayerOfType(); auto IP6layer = packet.getLayerOfType(); if (IP4layer) { - if (lib::utils::match_subnet(_hostIPv4, IP4layer->getDstIPv4Address().toInt()).has_value()) { + if (lib::utils::match_subnet(_hostIPv4, IP4layer->getDstIPv4Address().toInt()).first) { _packet_dir_cache = PacketDirection::toHost; - } else if (lib::utils::match_subnet(_hostIPv4, IP4layer->getSrcIPv4Address().toInt()).has_value()) { + } else if (lib::utils::match_subnet(_hostIPv4, IP4layer->getSrcIPv4Address().toInt()).first) { _packet_dir_cache = PacketDirection::fromHost; } } else if (IP6layer) { - if (lib::utils::match_subnet(_hostIPv6, IP6layer->getDstIPv6Address().toBytes()).has_value()) { + if (lib::utils::match_subnet(_hostIPv6, IP6layer->getDstIPv6Address().toBytes()).first) { _packet_dir_cache = PacketDirection::toHost; - } else if (lib::utils::match_subnet(_hostIPv6, IP6layer->getSrcIPv6Address().toBytes()).has_value()) { + } else if (lib::utils::match_subnet(_hostIPv6, IP6layer->getSrcIPv6Address().toBytes()).first) { _packet_dir_cache = PacketDirection::fromHost; } } @@ -447,21 +438,15 @@ void PcapInputStream::process_raw_packet(pcpp::RawPacket *rawPacket) } for (uint8_t counter = 0; counter < MAX_TCP_CLEANUPS; counter++) { - if (_lru_list->getSize() == 0) { + if (_lru_list.getSize() == 0) { break; } - auto connection = _lru_list->getLRUElement(); + auto connection = _lru_list.getLRUElement(); if (timestamp.tv_sec < connection.second.tv_sec + TCP_TIMEOUT) { break; } _tcp_reassembly.closeConnection(connection.first); - _lru_list->eraseElement(connection.first); - } - if (_lru_overflow.size() > 0) { - for (const auto &fKey : _lru_overflow) { - _tcp_reassembly.closeConnection(fKey); - } - _lru_overflow.clear(); + _lru_list.eraseElement(connection.first); } } else { // unsupported layer3 protocol @@ -593,8 +578,8 @@ void PcapInputStream::_get_hosts_from_libpcap_iface() if (!nmcvt) { throw PcapException("couldn't parse IPv4 netmask address on device"); } - uint8_t cidr = lib::utils::get_cidr(nmcvt->s_addr); - _hostIPv4.push_back({*adrcvt, cidr, ip + "/" + std::to_string(cidr)}); + uint8_t len = static_cast(0xFFFFFFFFUL & nmcvt->s_addr); + _hostIPv4.push_back({*adrcvt, len, ip + "/" + std::to_string(len)}); } else if (i.addr->sa_family == AF_INET6) { auto adrcvt = pcpp::internal::sockaddr2in6_addr(i.addr); if (!adrcvt) { @@ -604,8 +589,14 @@ void PcapInputStream::_get_hosts_from_libpcap_iface() if (!nmcvt) { throw PcapException("couldn't parse IPv6 netmask address on device"); } - uint8_t cidr = lib::utils::get_cidr(nmcvt->s6_addr, 16); - _hostIPv6.push_back({*adrcvt, cidr, ip + "/" + std::to_string(cidr)}); + uint8_t len = 0; + for (int i = 0; i < 16; i++) { + while (nmcvt->s6_addr[i]) { + len++; + nmcvt->s6_addr[i] >>= 1; + } + } + _hostIPv6.push_back({*adrcvt, len, ip + "/" + std::to_string(len)}); } } } diff --git a/src/inputs/pcap/PcapInputStream.h b/src/inputs/pcap/PcapInputStream.h index 647c2fdd0..d9744cb63 100644 --- a/src/inputs/pcap/PcapInputStream.h +++ b/src/inputs/pcap/PcapInputStream.h @@ -96,12 +96,9 @@ class PcapInputStream : public visor::InputStream private: static constexpr uint8_t TCP_TIMEOUT = 30; static constexpr uint8_t MAX_TCP_CLEANUPS = 100; - static constexpr size_t DEFAULT_LRULIST_SIZE = TCP_TIMEOUT * 10000; static const PcapSource DefaultPcapSource = PcapSource::libpcap; - std::unique_ptr> _lru_list; - std::pair _deleted_data; - std::vector _lru_overflow; + LRUList _lru_list; lib::utils::IPv4subnetList _hostIPv4; lib::utils::IPv6subnetList _hostIPv6; PacketDirection _packet_dir_cache{PacketDirection::unknown}; @@ -130,8 +127,7 @@ class PcapInputStream : public visor::InputStream "debug", "host_spec", "pcap_file", - "pcap_source", - "tcp_packet_reassembly_cache_limit"}; + "pcap_source"}; protected: void _open_pcap(const std::string &fileName, const std::string &bpfFilter); @@ -185,7 +181,7 @@ class PcapInputEventProxy : public visor::InputEventProxy // key example: dnsonly_rcode3 std::unordered_map _udp_predicate_signals; // key: - std::map> _udp_predicate_connections; + std::map _udp_predicate_connections; mutable std::shared_mutex _pcap_proxy_mutex; std::shared_ptr _logger; @@ -222,16 +218,14 @@ class PcapInputEventProxy : public visor::InputEventProxy } // now install the given conditional signal based on the jump key // record the connection so we can remove it later when the handler disconnects - _udp_predicate_connections[handler_id].push_back(_udp_predicate_signals[predicate_jump_key].connect(callback)); + _udp_predicate_connections[handler_id] = _udp_predicate_signals[predicate_jump_key].connect(callback); } void unregister_udp_predicate_signal(const std::string &handler_id) { assert(_udp_predicate_connections.find(handler_id) != _udp_predicate_connections.end()); std::shared_lock lock(_pcap_proxy_mutex); - for (auto &connection : _udp_predicate_connections[handler_id]) { - connection.disconnect(); - } + _udp_predicate_connections[handler_id].disconnect(); } void process_udp_packet_cb(pcpp::Packet &payload, PacketDirection dir, pcpp::ProtocolType l3, uint32_t flowkey, timespec stamp) @@ -286,7 +280,6 @@ class PcapInputEventProxy : public visor::InputEventProxy // note: these are mutable because consumer_count() calls slot_count() which is not const (unclear if it could/should be) mutable sigslot::signal packet_signal; mutable sigslot::signal udp_signal; - mutable sigslot::signal tcp_reassembled_signal; mutable sigslot::signal start_tstamp_signal; mutable sigslot::signal end_tstamp_signal; mutable sigslot::signal tcp_message_ready_signal; diff --git a/src/inputs/pcap/afpacket.cpp b/src/inputs/pcap/afpacket.cpp index 7d6952b08..06c8b30fa 100644 --- a/src/inputs/pcap/afpacket.cpp +++ b/src/inputs/pcap/afpacket.cpp @@ -72,10 +72,12 @@ void AFPacket::flush_block(struct block_desc *pbd) void AFPacket::walk_block(struct block_desc *pbd) { int num_pkts = pbd->h1.num_pkts, i; + uint64_t bytes = 0; struct tpacket3_hdr *ppd; ppd = reinterpret_cast(reinterpret_cast(pbd) + pbd->h1.offset_to_first_pkt); for (i = 0; i < num_pkts; ++i) { + bytes += ppd->tp_snaplen; auto data_pointer = reinterpret_cast(ppd) + ppd->tp_mac; pcpp::RawPacket packet(data_pointer, ppd->tp_snaplen, timespec{pbd->h1.ts_last_pkt.ts_sec, pbd->h1.ts_last_pkt.ts_nsec}, diff --git a/src/tests/fixtures/ipfix.pcap b/src/tests/fixtures/ipfix.pcap deleted file mode 100644 index 9b87ca517..000000000 Binary files a/src/tests/fixtures/ipfix.pcap and /dev/null differ diff --git a/src/tests/test_metrics.cpp b/src/tests/test_metrics.cpp index 710b8cdcb..0782934ea 100644 --- a/src/tests/test_metrics.cpp +++ b/src/tests/test_metrics.cpp @@ -3,7 +3,7 @@ using namespace visor; -class TestMetricsBucket final : public AbstractMetricsBucket +class TestMetricsBucket : public AbstractMetricsBucket { public: void specialized_merge([[maybe_unused]] const AbstractMetricsBucket &other, [[maybe_unused]] Metric::Aggregate agg_operator) @@ -17,11 +17,6 @@ class TestMetricsBucket final : public AbstractMetricsBucket { out << "test_performed" << std::endl; } - void to_opentelemetry(metrics::v1::ScopeMetrics &scope, timespec &, timespec &, Metric::LabelMap) const - { - scope.add_metrics()->set_name("test1"); - scope.add_metrics()->set_name("test2"); - } void update_topn_metrics(size_t, uint64_t) { } @@ -39,7 +34,6 @@ TEST_CASE("Abstract metrics manager", "[metrics][abstract]") { json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; visor::Config c; c.config_set("num_periods", 1); @@ -70,12 +64,6 @@ TEST_CASE("Abstract metrics manager", "[metrics][abstract]") CHECK(line == "test_performed"); } - SECTION("Abstract window single opentelemetry") - { - manager->window_single_opentelemetry(scope); - CHECK(scope.metrics_size() == 2); - } - SECTION("Abstract window single prometheus failed") { CHECK_THROWS_WITH(manager->window_single_prometheus(output, 2, {{"policy", "default"}}), @@ -133,7 +121,6 @@ TEST_CASE("Counter metrics", "[metrics][counter]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; Counter c("root", {"test", "metric"}, "A counter test metric"); @@ -166,15 +153,6 @@ TEST_CASE("Counter metrics", "[metrics][counter]") std::getline(output, line); CHECK(line == R"(root_test_metric{instance="test instance",policy="default"} 1)"); } - - SECTION("Counter opentelemetry") - { - ++c; - timespec stamp; - c.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_gauge()); - } } TEST_CASE("Quantile metrics", "[metrics][quantile]") @@ -183,7 +161,6 @@ TEST_CASE("Quantile metrics", "[metrics][quantile]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; Quantile q("root", {"test", "metric"}, "A quantile test metric"); @@ -235,15 +212,6 @@ TEST_CASE("Quantile metrics", "[metrics][quantile]") std::getline(output, line); CHECK(line == R"(root_test_metric_count{instance="test instance",policy="default"} 1)"); } - - SECTION("Quantile opentelemetry") - { - q.update(12); - timespec stamp; - q.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_summary()); - } } TEST_CASE("Histogram int metrics", "[metrics][histogram]") @@ -252,15 +220,14 @@ TEST_CASE("Histogram int metrics", "[metrics][histogram]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; - Histogram h("root", {"test", "metric"}, "A histogram test metric"); + Histogram h("root", {"test", "metric"}, "A histogram test metric"); SECTION("Histogram to json") { h.name_json_assign(j, 58); CHECK(j["test"]["metric"] == 58); - uint64_t value = 12; + int_fast32_t value = 12; h.update(value); h.update(value); h.update(value); @@ -270,6 +237,7 @@ TEST_CASE("Histogram int metrics", "[metrics][histogram]") CHECK(j["top"]["test"]["metric"]["buckets"]["+Inf"] == 4.0); CHECK(j["top"]["test"]["metric"]["buckets"]["12"] == 4.0); + CHECK(j["top"]["test"]["metric"]["buckets"]["4"] == 0.0); CHECK(j["top"]["test"]["metric"]["buckets"]["8"] == 1.0); } @@ -293,7 +261,7 @@ TEST_CASE("Histogram int metrics", "[metrics][histogram]") std::getline(output, line); CHECK(line == "# TYPE root_test_metric histogram"); std::getline(output, line); - CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="1",policy="default"} 1)"); + CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="4",policy="default"} 1)"); std::getline(output, line); CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="8",policy="default"} 2)"); std::getline(output, line); @@ -303,19 +271,6 @@ TEST_CASE("Histogram int metrics", "[metrics][histogram]") std::getline(output, line); CHECK(line == R"(root_test_metric_count{instance="test instance",policy="default"} 5)"); } - - SECTION("Histogram opentelemetry") - { - h.update(12); - h.update(12); - h.update(1); - h.update(8); - h.update(12); - timespec stamp; - h.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_histogram()); - } } TEST_CASE("Histogram double metrics", "[metrics][histogram]") @@ -324,7 +279,6 @@ TEST_CASE("Histogram double metrics", "[metrics][histogram]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; Histogram h("root", {"test", "metric"}, "A histogram test metric"); @@ -336,8 +290,10 @@ TEST_CASE("Histogram double metrics", "[metrics][histogram]") h.update(8.000); h.to_json(j["top"]); - CHECK(j["top"]["test"]["metric"]["buckets"]["12.915497"] == 4.0); - CHECK(j["top"]["test"]["metric"]["buckets"]["8.799225"] == 1.0); + CHECK(j["top"]["test"]["metric"]["buckets"]["+Inf"] == 4.0); + CHECK(j["top"]["test"]["metric"]["buckets"]["12.000000"] == 4.0); + CHECK(j["top"]["test"]["metric"]["buckets"]["4.000000"] == 0.0); + CHECK(j["top"]["test"]["metric"]["buckets"]["8.000000"] == 1.0); } SECTION("Histogram get n") @@ -360,29 +316,16 @@ TEST_CASE("Histogram double metrics", "[metrics][histogram]") std::getline(output, line); CHECK(line == "# TYPE root_test_metric histogram"); std::getline(output, line); - CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="1.000000",policy="default"} 1)"); + CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="4.000033",policy="default"} 1)"); std::getline(output, line); - CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="8.799225",policy="default"} 2)"); + CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="8.000067",policy="default"} 2)"); std::getline(output, line); - CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="12.915497",policy="default"} 5)"); + CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="12.000100",policy="default"} 5)"); std::getline(output, line); CHECK(line == R"(root_test_metric_bucket{instance="test instance",le="+Inf",policy="default"} 5)"); std::getline(output, line); CHECK(line == R"(root_test_metric_count{instance="test instance",policy="default"} 5)"); } - - SECTION("Histogram opentelemetry") - { - h.update(12.00); - h.update(12.0001); - h.update(1); - h.update(8); - h.update(12); - timespec stamp; - h.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_histogram()); - } } TEST_CASE("TopN metrics", "[metrics][topn]") @@ -391,7 +334,6 @@ TEST_CASE("TopN metrics", "[metrics][topn]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; TopN top_sting("root", "string", {"test", "metric"}, "A topn test metric"); TopN top_int("root", "integer", {"test", "metric"}, "A topn test metric"); @@ -416,6 +358,24 @@ TEST_CASE("TopN metrics", "[metrics][topn]") CHECK(j["top"]["test"]["metric"][0]["name"] == "123"); } + SECTION("TopN to json summary") + { + top_sting.update("top1"); + top_sting.update("top2"); + top_sting.update("top3"); + top_sting.update("none"); + top_sting.to_json(j["top"], [](const std::string &val) { + if(val.find("top") != std::string::npos) { + return std::string("top"); + } + return val; + }, Metric::Aggregate::SUMMARY); + CHECK(j["top"]["test"]["metric"][0]["estimate"] == 3); + CHECK(j["top"]["test"]["metric"][0]["name"] == "top"); + CHECK(j["top"]["test"]["metric"][1]["estimate"] == 1); + CHECK(j["top"]["test"]["metric"][1]["name"] == "none"); + } + SECTION("TopN prometheus") { top_sting.update("top1"); @@ -432,19 +392,6 @@ TEST_CASE("TopN metrics", "[metrics][topn]") CHECK(line == R"(root_test_metric{instance="test instance",policy="default",string="top2"} 1)"); } - SECTION("TopN opentelemetry") - { - top_sting.update("top1"); - top_sting.update("top2"); - top_sting.update("top1"); - timespec stamp; - top_sting.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_gauge()); - CHECK(scope.metrics_size() == 1); - CHECK(scope.metrics(0).gauge().data_points_size() == 2); - } - SECTION("TopN prometheus formatter") { top_int.update(123); @@ -462,18 +409,26 @@ TEST_CASE("TopN metrics", "[metrics][topn]") CHECK(line == R"(root_test_metric{instance="test instance",integer="10",policy="default"} 1)"); } - SECTION("TopN opentelemetry formatter") + SECTION("TopN to prometheus summary") { - top_int.update(123); - top_int.update(10); - top_int.update(123); - timespec stamp; - top_int.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}, - [](const uint16_t &val) { return std::to_string(val); }); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_gauge()); - CHECK(scope.metrics_size() == 1); - CHECK(scope.metrics(0).gauge().data_points_size() == 2); + top_sting.update("top1"); + top_sting.update("top2"); + top_sting.update("top3"); + top_sting.update("none"); + top_sting.to_prometheus(output, {{"policy", "default"}}, [](const std::string &val) { + if(val.find("top") != std::string::npos) { + return std::string("top"); + } + return val; + }, Metric::Aggregate::SUMMARY); + std::getline(output, line); + CHECK(line == "# HELP root_test_metric A topn test metric"); + std::getline(output, line); + CHECK(line == "# TYPE root_test_metric gauge"); + std::getline(output, line); + CHECK(line == R"(root_test_metric{instance="test instance",policy="default",string="top"} 3)"); + std::getline(output, line); + CHECK(line == R"(root_test_metric{instance="test instance",policy="default",string="none"} 1)"); } SECTION("TopN get count size") @@ -503,7 +458,6 @@ TEST_CASE("Cardinality metrics", "[metrics][cardinality]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; Cardinality c("root", {"test", "metric"}, "A cardinality test metric"); @@ -531,16 +485,6 @@ TEST_CASE("Cardinality metrics", "[metrics][cardinality]") std::getline(output, line); CHECK(line == R"(root_test_metric{instance="test instance",policy="default"} 1)"); } - - SECTION("Cardinality opentelemetry") - { - c.update("metric"); - timespec stamp; - c.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - CHECK(scope.metrics(0).name() == "root_test_metric"); - CHECK(scope.metrics(0).has_gauge()); - CHECK(scope.metrics_size() == 1); - } } TEST_CASE("Rate metrics", "[metrics][rate]") @@ -549,7 +493,6 @@ TEST_CASE("Rate metrics", "[metrics][rate]") json j; std::stringstream output; - metrics::v1::ScopeMetrics scope; std::string line; Rate r("root", {"test", "metric"}, "A rate test metric"); @@ -575,10 +518,4 @@ TEST_CASE("Rate metrics", "[metrics][rate]") { r.to_prometheus(output, {{"policy", "default"}}); } - - SECTION("rate opentelemetry") - { - timespec stamp; - r.to_opentelemetry(scope, stamp, stamp, {{"policy", "default"}}); - } }