Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 81 additions & 0 deletions .github/workflows/run-external-processing.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
name: External-processing tests

on:
workflow_call:
inputs:
binaries_artifact:
description: "Artifact name containing the binaries to test"
default: ''
required: false
type: string
ci_environment:
description: "Which CI environment is running the tests, used for FPD"
default: 'custom'
required: false
type: string
build_proxy_image:
description: "Shall we build proxy image"
default: false
required: false
type: boolean

env:
REGISTRY: ghcr.io


jobs:
external-processing:
runs-on:
group: "APM Larger Runners"

env:
SYSTEM_TESTS_REPORT_ENVIRONMENT: ${{ inputs.ci_environment }}
SYSTEM_TESTS_REPORT_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}

steps:
- name: Checkout
uses: actions/checkout@v4
with:
repository: 'DataDog/system-tests'
- name: Install runner
uses: ./.github/actions/install_runner

- name: Log in to the Container registry
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login ${{ env.REGISTRY }} -u ${{ github.actor }} --password-stdin

- name: Get binaries artifact
if : ${{ inputs.binaries_artifact != '' }}
uses: actions/download-artifact@v4
with:
name: ${{ inputs.binaries_artifact }}
path: binaries/

- name: Pull images
uses: ./.github/actions/pull_images
with:
library: golang
weblog: golang-dummy
scenarios: '["EXTERNAL_PROCESSING"]'

- name: Build proxy image
if: inputs.build_proxy_image
run: ./build.sh -i proxy

- name: Build agent image
run: ./build.sh -i agent

- name: Run EXTERNAL_PROCESSING scenario
run: ./run.sh EXTERNAL_PROCESSING
env:
DD_API_KEY: ${{ secrets.DD_API_KEY }}

- name: Compress logs
id: compress_logs
if: always() && steps.build.outcome == 'success'
run: tar -czvf artifact.tar.gz $(ls | grep logs)
- name: Upload artifact
if: always() && steps.compress_logs.outcome == 'success'
uses: actions/upload-artifact@v4
with:
name: logs_externalprocessing_golang_golang-dummy_${{ inputs.ci_environment }}
path: artifact.tar.gz
11 changes: 11 additions & 0 deletions .github/workflows/system-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -143,3 +143,14 @@ jobs:
with:
library: ${{ inputs.library }}
weblogs: ${{ needs.compute_parameters.outputs.dockerssi_weblogs }}

external-processing:
needs:
- compute_parameters
if: ${{ needs.compute_parameters.outputs.externalprocessing_scenarios != '[]' && inputs.library == 'golang' && inputs.binaries_artifact != ''}}
uses: ./.github/workflows/run-external-processing.yml
secrets: inherit
with:
build_proxy_image: ${{ inputs.build_proxy_image }}
ci_environment: ${{ inputs.ci_environment }}
binaries_artifact: ${{ inputs.binaries_artifact }}
17 changes: 17 additions & 0 deletions docs/scenarios/external_processing.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
```mermaid
flowchart LR
%% Nodes
A("Test runner")
B("Envoy")
C("External Processing")
D("HTTP app")
E("Proxy")
F("Agent")
G("Backend")

%% Edge connections between nodes
A --> B --> D
B --> C --> B
C --> E --> F --> G
%% D -- Mermaid js --> I --> J
```
78 changes: 78 additions & 0 deletions tests/external_processing/envoy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
static_resources:
listeners:
- name: listener_0
address:
socket_address:
address: 0.0.0.0
port_value: 80
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/"
route:
cluster: web_service
http_filters:
- name: envoy.filters.http.ext_proc
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_proc.v3.ExternalProcessor
grpc_service:
envoy_grpc:
cluster_name: ext_proc_cluster
timeout: 0.25s
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: web_service
connect_timeout: 0.25s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: web_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: http-app
port_value: 8080

- name: ext_proc_cluster
connect_timeout: 0.25s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
http2_protocol_options: {}
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
sni: extproc
load_assignment:
cluster_name: ext_proc_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: extproc
port_value: 443

# used for health checking
# admin:
# access_log_path: "/tmp/admin_access.log"
# address:
# socket_address:
# address: 0.0.0.0
# port_value: 9901
16 changes: 16 additions & 0 deletions tests/external_processing/test_external_processing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from utils import weblog, interfaces, scenarios, features


@features.not_reported # it's just a POC. We'll need to figure out how we want to see results in FPD
@scenarios.external_processing
class Test_ExternalProcessing:
def setup_main(self):
self.r = weblog.get("/mock", params={"status_code": 200})

def test_main(self):
assert self.r.status_code == 200

interfaces.library.assert_trace_exists(self.r)

for _, span in interfaces.library.get_root_spans(request=self.r):
assert span["meta"]["http.url"] == "http://localhost:7777/mock?status_code=200"
3 changes: 3 additions & 0 deletions utils/_context/_scenarios/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .auto_injection import InstallerAutoInjectionScenario
from .k8s_lib_injection import KubernetesScenario, WeblogInjectionScenario
from .docker_ssi import DockerSSIScenario
from .external_processing import ExternalProcessingScenario

update_environ_with_local_env()

Expand Down Expand Up @@ -696,6 +697,8 @@ def all_endtoend_scenarios(test_object):
scenario_groups=[ScenarioGroup.APPSEC],
)

external_processing = ExternalProcessingScenario("EXTERNAL_PROCESSING")


def get_all_scenarios() -> list[Scenario]:
result = []
Expand Down
2 changes: 2 additions & 0 deletions utils/_context/_scenarios/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ class ScenarioGroup(Enum):
ONBOARDING = "onboarding"
DOCKER_SSI = "docker-ssi"
ESSENTIALS = "essentials"
EXTERNAL_PROCESSING = "external-processing"


VALID_GITHUB_WORKFLOWS = {
Expand All @@ -34,6 +35,7 @@ class ScenarioGroup(Enum):
"parametric",
"testthetest",
"dockerssi",
"externalprocessing",
}


Expand Down
56 changes: 29 additions & 27 deletions utils/_context/_scenarios/endtoend.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,30 @@ def get_container_by_dd_integration_name(self, name):
return container
return None

def _start_interfaces_watchdog(self, interfaces):
class Event(FileSystemEventHandler):
def __init__(self, interface) -> None:
super().__init__()
self.interface = interface

def _ingest(self, event):
if event.is_directory:
return

self.interface.ingest_file(event.src_path)

on_modified = _ingest
on_created = _ingest

# lot of issue using the default OS dependant notifiers (not working on WSL, reaching some inotify watcher
# limits on Linux) -> using the good old bare polling system
observer = PollingObserver()

for interface in interfaces:
observer.schedule(Event(interface), path=interface._log_folder)

observer.start()

def get_warmups(self):
warmups = super().get_warmups()

Expand Down Expand Up @@ -306,41 +330,19 @@ def _create_interface_folders(self):
for container in self.buddies:
self._create_log_subfolder(f"interfaces/{container.interface.name}")

def _start_interface_watchdog(self):
def _start_interfaces_watchdog(self, _=None):
from utils import interfaces

class Event(FileSystemEventHandler):
def __init__(self, interface) -> None:
super().__init__()
self.interface = interface

def _ingest(self, event):
if event.is_directory:
return

self.interface.ingest_file(event.src_path)

on_modified = _ingest
on_created = _ingest

# lot of issue using the default OS dependant notifiers (not working on WSL, reaching some inotify watcher
# limits on Linux) -> using the good old bare polling system
observer = PollingObserver()

observer.schedule(Event(interfaces.library), path=f"{self.host_log_folder}/interfaces/library")
observer.schedule(Event(interfaces.agent), path=f"{self.host_log_folder}/interfaces/agent")

for container in self.buddies:
observer.schedule(Event(container.interface), path=container.interface._log_folder)

observer.start()
super()._start_interfaces_watchdog(
[interfaces.library, interfaces.agent] + [container.interface for container in self.buddies]
)

def get_warmups(self):
warmups = super().get_warmups()

if not self.replay:
warmups.insert(0, self._create_interface_folders)
warmups.insert(1, self._start_interface_watchdog)
warmups.insert(1, self._start_interfaces_watchdog)
warmups.append(self._get_weblog_system_info)
warmups.append(self._wait_for_app_readiness)

Expand Down
Loading