diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index bcacd5e1be3..78078f79358 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -50,7 +50,7 @@ pipeline { dir("${BASE_DIR}"){ setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.(asciidoc|md)' ], shouldMatchAll: true).toString()) setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) - setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc)' ], shouldMatchAll: false).toString()) + setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) } } } @@ -79,7 +79,7 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'darwin && orka && x86_64' + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'macos12 && x86_64' } } stages { @@ -164,7 +164,7 @@ pipeline { } } environment { - ARCH = "${PLATFORM.equals('aarch64') ? 'arm64' : 'amd64'}" + ARCH = "${PLATFORM.contains('aarch64') ? 'arm64' : 'amd64'}" DEV = true EXTERNAL = true } @@ -175,7 +175,7 @@ pipeline { withMageEnv(){ dir("${BASE_DIR}"){ withPackageEnv("${PLATFORM}") { - cmd(label: 'Go package', script: 'mage package') + cmd(label: 'Go package', script: 'mage package ironbank') uploadPackagesToGoogleBucket( credentialsId: env.JOB_GCS_EXT_CREDENTIALS, repo: env.REPO, @@ -219,7 +219,7 @@ pipeline { axes { axis { name 'K8S_VERSION' - values "v1.24.0, v1.23.6, v1.22.9, v1.21.12" + values "v1.24.0", "v1.23.6", "v1.22.9", "v1.21.12" } } stages { @@ -251,18 +251,23 @@ pipeline { } steps { // TODO: what's the testMatrixFile to be used if any - runE2E(testMatrixFile: '', + runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", gitHubCheckName: "e2e-tests", gitHubCheckRepo: env.REPO, - gitHubCheckSha1: env.GIT_BASE_COMMIT) + gitHubCheckSha1: env.GIT_BASE_COMMIT, + propagate: true, + wait: true) } } } post { cleanup { - notifyBuildResult(prComment: true) + notifyBuildResult(prComment: true, + analyzeFlakey: !isTag(), jobName: getFlakyJobName(withBranch: (isPR() ? env.CHANGE_TARGET : env.BRANCH_NAME)), + githubIssue: isBranch() && currentBuild.currentResult != "SUCCESS", + githubLabels: 'Team:Elastic-Agent-Control-Plane') } } } @@ -274,7 +279,7 @@ def isCodeCoverageEnabled() { def withPackageEnv(platform, Closure body) { if (isUnix()) { - if (platform.contains('macosx')) { + if (isDarwin()) { withPackageDarwinEnv() { body() } diff --git a/.ci/jobs/elastic-agent-mbp.yml b/.ci/jobs/elastic-agent-mbp.yml index f3772fd3855..8947d15880a 100644 --- a/.ci/jobs/elastic-agent-mbp.yml +++ b/.ci/jobs/elastic-agent-mbp.yml @@ -2,7 +2,7 @@ - job: name: "elastic-agent/elastic-agent-mbp" display-name: elastic-agent - description: "POC to isolate elastic agent from beats" + description: "Elastic agent" project-type: multibranch script-path: .ci/Jenkinsfile scm: @@ -12,6 +12,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true + head-filter-regex: '(main|7\.17|8\.\d+|PR-.*|v\d+\.\d+\.\d+)' notification-context: 'fleet-ci' repo: elastic-agent repo-owner: elastic @@ -39,4 +40,4 @@ timeout: 100 timeout: '15' use-author: true - wipe-workspace: 'True' + wipe-workspace: true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bc147bf0680..d8bc0072d7b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,4 @@ -# Team responsable for Fleet Server +# Team responsible for Fleet Server * @elastic/elastic-agent-control-plane + +/deploy/kubernetes @elastic/obs-cloudnative-monitoring diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml index 1b296620b09..e6add0d093c 100644 --- a/.github/workflows/elastic-agent-project-board.yml +++ b/.github/workflows/elastic-agent-project-board.yml @@ -14,7 +14,7 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - mutation add_to_project($projectid:String!,$contentid:String!) { + mutation add_to_project($projectid:[ID!]!,$contentid:ID!) { updateIssue(input: {id:$contentid, projectIds:$projectid}) { clientMutationId } diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 79a22cbabc5..8079fe1c673 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -14,18 +14,10 @@ jobs: golangci: strategy: matrix: - include: - - GOOS: windows - - GOOS: linux - - GOOS: darwin + os: [ ubuntu-latest, macos-latest, windows-latest ] name: lint - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} steps: - - name: Echo details - env: - GOOS: ${{ matrix.GOOS }} - run: echo Go GOOS=$GOOS - - uses: actions/checkout@v2 # Uses Go version from the repository. @@ -38,8 +30,6 @@ jobs: go-version: "${{ steps.goversion.outputs.version }}" - name: golangci-lint - env: - GOOS: ${{ matrix.GOOS }} uses: golangci/golangci-lint-action@v2 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version diff --git a/.gitignore b/.gitignore index 3939307f99c..9940bf5068e 100644 --- a/.gitignore +++ b/.gitignore @@ -60,4 +60,3 @@ pkg/component/fake/fake # VSCode /.vscode - diff --git a/.mergify.yml b/.mergify.yml index 6e1e3c5f651..3fe46362854 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -181,55 +181,42 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.0 branch - conditions: - - merged - - label=backport-v8.0.0 - actions: - backport: - assignees: - - "{{ author }}" - branches: - - "8.0" - labels: - - "backport" - title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.1 branch + - name: backport patches to 8.2 branch conditions: - merged - - label=backport-v8.1.0 + - label=backport-v8.2.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.1" + - "8.2" labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.2 branch + - name: backport patches to 8.3 branch conditions: - merged - - label=backport-v8.2.0 + - label=backport-v8.3.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.2" + - "8.3" labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.3 branch + - name: backport patches to 8.4 branch conditions: - merged - - label=backport-v8.3.0 + - label=backport-v8.4.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.3" + - "8.4" labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index fad7186655f..a2b19fb1a90 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -1,71 +1,74 @@ // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/beats/issues/ -:pull: https://github.com/elastic/beats/pull/ +:issue-beats: https://github.com/elastic/beats/issues/ +:pull-beats: https://github.com/elastic/beats/pull/ + +:issue: https://github.com/elastic/elastic-agent/issues/ +:pull: https://github.com/elastic/elastic-agent/pull/ [[release-notes-7.9.0]] === Elastic Agent version 7.9.0 ==== Breaking changes -- Change fleet.yml structure, causes upgraded agent to register as new agent {pull}19248[19248] -- Remove obfuscation of fleet.yml, causes re-enroll of agent to Fleet {pull}19678[19678] -- Rename enroll --ca_sha256 to --ca-sha256 {pull}19900[19900] -- Rename enroll --certificate_authorities to --certificate-authorities {pull}19900[19900] -- Don't build 32 bits version of Elastic Agent. {issue}25533[25533] +- Change fleet.yml structure, causes upgraded agent to register as new agent {pull-beats}[19248] +- Remove obfuscation of fleet.yml, causes re-enroll of agent to Fleet {pull-beats}[19678] +- Rename enroll --ca_sha256 to --ca-sha256 {pull-beats}[19900] +- Rename enroll --certificate_authorities to --certificate-authorities {pull-beats}[19900] +- Don't build 32 bits version of Elastic Agent. {issue-beats}[25533] ==== Bugfixes -- Fix install service script for windows {pull}18814[18814] -- Properly stops subprocess on shutdown {pull}19567[19567] -- Forward revision number of the configuration to the endpoint. {pull}19759[19759] -- Remove support for logs type and use logfile {pull}19761[19761] -- Avoid comparing uncomparable types on enroll {issue}19976[19976] -- Fix issues with merging of elastic-agent.yml and fleet.yml {pull}20026[20026] -- Unzip failures on Windows 8/Windows server 2012 {pull}20088[20088] -- Fix failing unit tests on windows {pull}20127[20127] -- Prevent closing closed reader {pull}20214[20214] -- Improve GRPC stop to be more relaxed {pull}20118[20118] -- Fix Windows service installation script {pull}20203[20203] -- Fix timeout issue stopping service applications {pull}20256[20256] -- Fix incorrect hash when upgrading agent {pull}22322[22322] -- Fix refresh of monitoring configuration {pull}23619[23619] -- Fixed nil pointer during unenroll {pull}23609[23609] -- Fixed reenroll scenario {pull}23686[23686] -- Fixed Monitoring filebeat and metricbeat not connecting to Agent over GRPC {pull}23843[23843] -- Fixed make status readable in the log. {pull}23849[23849] -- Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull}23998[23998] -- Fix reloading of log level for services {pull}24055[24055] -- Fix: Successfully installed and enrolled agent running standalone{pull}24128[24128] -- Make installer atomic on windows {pull}24253[24253] -- Remove installed services on agent uninstall {pull}24151[24151] -- Fix failing installation on windows 7 {pull}24387[24387] -- Fix capabilities resolution in inspect command {pull}24346[24346] -- Fix windows installer during enroll {pull}24343[24343] -- Logging to file disabled on enroll {issue}24173[24173] -- Prevent uninstall failures on empty config {pull}24838[24838] -- Fix issue with FLEET_CA not being used with Fleet Server in container {pull}26529[26529] +- Fix install service script for windows {pull-beats}[18814] +- Properly stops subprocess on shutdown {pull-beats}[19567] +- Forward revision number of the configuration to the endpoint. {pull-beats}[19759] +- Remove support for logs type and use logfile {pull-beats}[19761] +- Avoid comparing uncomparable types on enroll {issue-beats}[19976] +- Fix issues with merging of elastic-agent.yml and fleet.yml {pull-beats}[20026] +- Unzip failures on Windows 8/Windows server 2012 {pull-beats}[20088] +- Fix failing unit tests on windows {pull-beats}[20127] +- Prevent closing closed reader {pull-beats}[20214] +- Improve GRPC stop to be more relaxed {pull-beats}[20118] +- Fix Windows service installation script {pull-beats}[20203] +- Fix timeout issue stopping service applications {pull-beats}[20256] +- Fix incorrect hash when upgrading agent {pull-beats}[22322] +- Fix refresh of monitoring configuration {pull-beats}[23619] +- Fixed nil pointer during unenroll {pull-beats}[23609] +- Fixed reenroll scenario {pull-beats}[23686] +- Fixed Monitoring filebeat and metricbeat not connecting to Agent over GRPC {pull-beats}[23843] +- Fixed make status readable in the log. {pull-beats}[23849] +- Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull-beats}[23998] +- Fix reloading of log level for services {pull-beats}[24055] +- Fix: Successfully installed and enrolled agent running standalone{pull-beats}[24128] +- Make installer atomic on windows {pull-beats}[24253] +- Remove installed services on agent uninstall {pull-beats}[24151] +- Fix failing installation on windows 7 {pull-beats}[24387] +- Fix capabilities resolution in inspect command {pull-beats}[24346] +- Fix windows installer during enroll {pull-beats}[24343] +- Logging to file disabled on enroll {issue-beats}[24173] +- Prevent uninstall failures on empty config {pull-beats}[24838] +- Fix issue with FLEET_CA not being used with Fleet Server in container {pull-beats}[26529] ==== New features -- Change monitoring defaults for agent {pull}18927[18927] -- Agent verifies packages before using them {pull}18876[18876] -- Change stream.* to dataset.* fields {pull}18967[18967] -- Agent now runs the GRPC server and spawned application connect by to Agent {pull}18973[18973] -- Rename input.type logs to logfile {pull}19360[19360] -- Agent now installs/uninstalls Elastic Endpoint {pull}19248[19248] -- Agent now downloads Elastic Endpoint {pull}19503[19503] -- Refuse invalid stream values in configuration {pull}19587[19587] -- Agent now load balances across multiple Kibana instances {pull}19628[19628] -- Configuration cleanup {pull}19848[19848] -- Agent now sends its own logs to elasticsearch {pull}19811[19811] -- Add --insecure option to enroll command {pull}19900[19900] -- Will retry to enroll if the server return a 429. {pull}19918[19811] -- Add --staging option to enroll command {pull}20026[20026] -- Add `event.dataset` to all events {pull}20076[20076] -- Send datastreams fields {pull}20416[20416] -- Agent supports capabilities definition {pull}23848[23848] -- Restart process on output change {pull}24907[24907] +- Change monitoring defaults for agent {pull-beats}[18927] +- Agent verifies packages before using them {pull-beats}[18876] +- Change stream.* to dataset.* fields {pull-beats}[18967] +- Agent now runs the GRPC server and spawned application connect by to Agent {pull-beats}[18973] +- Rename input.type logs to logfile {pull-beats}[19360] +- Agent now installs/uninstalls Elastic Endpoint {pull-beats}[19248] +- Agent now downloads Elastic Endpoint {pull-beats}[19503] +- Refuse invalid stream values in configuration {pull-beats}[19587] +- Agent now load balances across multiple Kibana instances {pull-beats}[19628] +- Configuration cleanup {pull-beats}[19848] +- Agent now sends its own logs to elasticsearch {pull-beats}[19811] +- Add --insecure option to enroll command {pull-beats}[19900] +- Will retry to enroll if the server return a 429. {pull-beats}[19811] +- Add --staging option to enroll command {pull-beats}[20026] +- Add `event.dataset` to all events {pull-beats}[20076] +- Send datastreams fields {pull-beats}[20416] +- Agent supports capabilities definition {pull-beats}[23848] +- Restart process on output change {pull-beats}[24907] === Docs @@ -75,61 +78,61 @@ === Elastic Agent version 7.8.0 ==== Breaking changes -- Rename agent to elastic-agent {pull}17391[17391] +- Rename agent to elastic-agent {pull-beats}[17391] ==== Bugfixes -- Fixed tests on windows {pull}16922[16922] -- Fixed installers for SNAPSHOTs and windows {pull}17077[17077] -- Fixed merge of config {pull}17399[17399] -- Handle abs paths on windows correctly {pull}17461[17461] -- Improved cancellation of agent {pull}17318[17318] -- Fixed process spawning on Windows {pull}17751[17751] -- Fix issues when running `mage package` for all the platforms. {pull}17767[17767] -- Rename the User-Agent string from Beats Agent to Elastic Agent. {pull}17765[17765] -- Remove the kbn-version on each request to the Kibana API. {pull}17764[17764] -- Fixed injected log path to monitoring beat {pull}17833[17833] -- Make sure that the Elastic Agent connect over TLS in cloud. {pull}17843[17843] -- Moved stream.* fields to top of event {pull}17858[17858] -- Use /tmp for default monitoring endpoint location for libbeat {pull}18131[18131] -- Use default output by default {pull}18091[18091] -- Fix panic and flaky tests for the Agent. {pull}18135[18135] -- Fix default configuration after enroll {pull}18232[18232] -- Fix make sure the collected logs or metrics include streams information. {pull}18261[18261] -- Fix version to 7.8 {pull}18286[18286] -- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull}17843[17843] -- Ensure that the beats uses the params prefer_v2_templates on bulk request. {pull}18318[18318] -- Stop monitoring on config change {pull}18284[18284] -- Enable more granular control of monitoring {pull}18346[18346] -- Fix jq: command not found {pull}18408[18408] -- Avoid Chown on windows {pull}18512[18512] -- Clean action store after enrolling to new configuration {pull}18656[18656] -- Avoid watching monitor logs {pull}18723[18723] -- Correctly report platform and family. {issue}18665[18665] -- Guard against empty stream.datasource and namespace {pull}18769[18769] -- Fix install service script for windows {pull}18814[18814] +- Fixed tests on windows {pull-beats}[16922] +- Fixed installers for SNAPSHOTs and windows {pull-beats}[17077] +- Fixed merge of config {pull-beats}[17399] +- Handle abs paths on windows correctly {pull-beats}[17461] +- Improved cancellation of agent {pull-beats}[17318] +- Fixed process spawning on Windows {pull-beats}[17751] +- Fix issues when running `mage package` for all the platforms. {pull-beats}[17767] +- Rename the User-Agent string from Beats Agent to Elastic Agent. {pull-beats}[17765] +- Remove the kbn-version on each request to the Kibana API. {pull-beats}[17764] +- Fixed injected log path to monitoring beat {pull-beats}[17833] +- Make sure that the Elastic Agent connect over TLS in cloud. {pull-beats}[17843] +- Moved stream.* fields to top of event {pull-beats}[17858] +- Use /tmp for default monitoring endpoint location for libbeat {pull-beats}[18131] +- Use default output by default {pull-beats}[18091] +- Fix panic and flaky tests for the Agent. {pull-beats}[18135] +- Fix default configuration after enroll {pull-beats}[18232] +- Fix make sure the collected logs or metrics include streams information. {pull-beats}[18261] +- Fix version to 7.8 {pull-beats}[18286] +- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull-beats}[17843] +- Ensure that the beats uses the params prefer_v2_templates on bulk request. {pull-beats}[18318] +- Stop monitoring on config change {pull-beats}[18284] +- Enable more granular control of monitoring {pull-beats}[18346] +- Fix jq: command not found {pull-beats}[18408] +- Avoid Chown on windows {pull-beats}[18512] +- Clean action store after enrolling to new configuration {pull-beats}[18656] +- Avoid watching monitor logs {pull-beats}[18723] +- Correctly report platform and family. {issue-beats}[18665] +- Guard against empty stream.datasource and namespace {pull-beats}[18769] +- Fix install service script for windows {pull-beats}[18814] ==== New features -- Generate index name in a format type-dataset-namespace {pull}16903[16903] -- OS agnostic default configuration {pull}17016[17016] -- Introduced post install hooks {pull}17241[17241] -- Support for config constraints {pull}17112[17112] -- Introduced `mage demo` command {pull}17312[17312] -- Display the stability of the agent at enroll and start. {pull}17336[17336] -- Expose stream.* variables in events {pull}17468[17468] -- Monitoring configuration reloadable {pull}17855[17855] -- Pack ECS metadata to request payload send to fleet {pull}17894[17894] -- Allow CLI overrides of paths {pull}17781[17781] -- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull}17909[17909] -- Configurable log level {pull}18083[18083] -- Use data subfolder as default for process logs {pull}17960[17960] -- Enable introspecting configuration {pull}18124[18124] -- Follow home path for all config files {pull}18161[18161] -- Do not require unnecessary configuration {pull}18003[18003] -- Use nested objects so fleet can handle metadata correctly {pull}18234[18234] -- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull}17935[17935] -- Pick up version from libbeat {pull}18350[18350] -- More clear output of inspect command {pull}18405[18405] -- When not port are specified and the https is used fallback to 443 {pull}18844[18844] -- Basic upgrade process {pull}21002[21002] +- Generate index name in a format type-dataset-namespace {pull-beats}[16903] +- OS agnostic default configuration {pull-beats}[17016] +- Introduced post install hooks {pull-beats}[17241] +- Support for config constraints {pull-beats}[17112] +- Introduced `mage demo` command {pull-beats}[17312] +- Display the stability of the agent at enroll and start. {pull-beats}[17336] +- Expose stream.* variables in events {pull-beats}[17468] +- Monitoring configuration reloadable {pull-beats}[17855] +- Pack ECS metadata to request payload send to fleet {pull-beats}[17894] +- Allow CLI overrides of paths {pull-beats}[17781] +- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull-beats}[17909] +- Configurable log level {pull-beats}[18083] +- Use data subfolder as default for process logs {pull-beats}[17960] +- Enable introspecting configuration {pull-beats}[18124] +- Follow home path for all config files {pull-beats}[18161] +- Do not require unnecessary configuration {pull-beats}[18003] +- Use nested objects so fleet can handle metadata correctly {pull-beats}[18234] +- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull-beats}[17935] +- Pick up version from libbeat {pull-beats}[18350] +- More clear output of inspect command {pull-beats}[18405] +- When not port are specified and the https is used fallback to 443 {pull-beats}[18844] +- Basic upgrade process {pull-beats}[21002] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2c3e563cf21..2361baf73f5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -1,179 +1,189 @@ // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/beats/issues/ -:pull: https://github.com/elastic/beats/pull/ +:issue-beats: https://github.com/elastic/beats/issues/ +:pull-beats: https://github.com/elastic/beats/pull/ + +:issue: https://github.com/elastic/elastic-agent/issues/ +:pull: https://github.com/elastic/elastic-agent/pull/ === Elastic Agent version HEAD ==== Breaking changes -- Docker container is not run as root by default. {pull}21213[21213] -- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull}24713[24713] -- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull}25186[25186] -- Remove the `--kibana-url` from `install` and `enroll` command. {pull}25529[25529] -- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull}25723[25723] -- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull}28006[28006] -- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull}28165[28165] -- Remove username/password for fleet-server authentication. {pull}29458[29458] +- Docker container is not run as root by default. {pull-beats}[21213] +- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull-beats}[24713] +- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull-beats}[25186] +- Remove the `--kibana-url` from `install` and `enroll` command. {pull-beats}[25529] +- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull-beats}[25723] +- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] +- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] +- Remove username/password for fleet-server authentication. {pull-beats}[29458] ==== Bugfixes -- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull}20779[20779] -- Thread safe sorted set {pull}21290[21290] -- Copy Action store on upgrade {pull}21298[21298] -- Include inputs in action store actions {pull}21298[21298] -- Fix issue where inputs without processors defined would panic {pull}21628[21628] -- Prevent reporting ecs version twice {pull}21616[21616] -- Partial extracted beat result in failure to spawn beat {issue}21718[21718] -- Use symlink path for reexecutions {pull}21835[21835] -- Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] -- Use local temp instead of system one {pull}21883[21883] -- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] -- Fix issue with named pipes on Windows 7 {pull}21931[21931] -- Fix missing elastic_agent event data {pull}21994[21994] -- Ensure shell wrapper path exists before writing wrapper on install {pull}22144[22144] -- Fix deb/rpm packaging for Elastic Agent {pull}22153[22153] -- Fix composable input processor promotion to fix duplicates {pull}22344[22344] -- Fix sysv init files for deb/rpm installation {pull}22543[22543] -- Fix shell wrapper for deb/rpm packaging {pull}23038[23038] -- Fixed parsing of npipe URI {pull}22978[22978] -- Select default agent policy if no enrollment token provided. {pull}23973[23973] -- Remove artifacts on transient download errors {pull}23235[23235] -- Support for linux/arm64 {pull}23479[23479] -- Skip top level files when unziping archive during upgrade {pull}23456[23456] -- Do not take ownership of Endpoint log path {pull}23444[23444] -- Fixed fetching DBus service PID {pull}23496[23496] -- Fix issue of missing log messages from filebeat monitor {pull}23514[23514] -- Increase checkin grace period to 30 seconds {pull}23568[23568] -- Fix libbeat from reporting back degraded on config update {pull}23537[23537] -- Rewrite check if agent is running with admin rights on Windows {pull}23970[23970] -- Fix issues with dynamic inputs and conditions {pull}23886[23886] -- Fix bad substitution of API key. {pull}24036[24036] -- Fix docker enrollment issue related to Fleet Server change. {pull}24155[24155] -- Improve log on failure of Endpoint Security installation. {pull}24429[24429] -- Verify communication to Kibana before updating Fleet client. {pull}24489[24489] -- Fix nil pointer when null is generated as list item. {issue}23734[23734] -- Add support for filestream input. {pull}24820[24820] -- Add check for URL set when cert and cert key. {pull}24904[24904] -- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull}24981[24981] -- Respect host configuration for exposed processes endpoint {pull}25114[25114] -- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull}25137[25137] -- Fixed: limit for retries to Kibana configurable {issue}25063[25063] -- Fix issue with status and inspect inside of container {pull}25204[25204] -- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull}25149[25149] -- Reduce log level for listener cleanup to debug {pull}25274 -- Passing in policy id to container command works {pull}25352[25352] -- Reduce log level for listener cleanup to debug {pull}25274[25274] -- Delay the restart of application when a status report of failure is given {pull}25339[25339] -- Don't log when upgrade capability doesn't apply {pull}25386[25386] -- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue}25371[25371] -- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue}24453[24453] -- Fix AckBatch to do nothing when no actions passed {pull}25562[25562] -- Add error log entry when listener creation fails {issue}23483[23482] -- Handle case where policy doesn't contain Fleet connection information {pull}25707[25707] -- Fix fleet-server.yml spec to not overwrite existing keys {pull}25741[25741] -- Agent sends wrong log level to Endpoint {issue}25583[25583] -- Fix startup with failing configuration {pull}26057[26057] -- Change timestamp in elatic-agent-json.log to use UTC {issue}25391[25391] -- Fix add support for Logstash output. {pull}24305[24305] -- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull}26583[26583] -- Fix issue where proxy enrollment options broke enrollment command. {pull}26749[26749] -- Remove symlink.prev from previously failed upgrade {pull}26785[26785] -- Fix apm-server supported outputs not being in sync with supported output types. {pull}26885[26885] -- Set permissions during installation {pull}26665[26665] -- Disable monitoring during fleet-server bootstrapping. {pull}27222[27222] -- Fix issue with atomic extract running in K8s {pull}27396[27396] -- Fix issue with install directory in state path in K8s {pull}27396[27396] -- Disable monitoring during fleet-server bootstrapping. {pull}27222[27222] -- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue}27670[27670] {pull}27671[27671] -- Add validation for certificate flags to ensure they are absolute paths. {pull}27779[27779] -- Migrate state on upgrade {pull}27825[27825] -- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue}25449[25449] -- Ignore ErrNotExists when fixing permissions. {issue}27836[27836] {pull}27846[27846] -- Snapshot artifact lookup will use agent.download proxy settings. {issue}27903[27903] {pull}27904[27904] -- Fix lazy acker to only add new actions to the batch. {pull}27981[27981] -- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull}28260[28260] -- Fix agent configuration overwritten by default fleet config. {pull}29297[29297] -- Allow agent containers to use basic auth to create a service token. {pull}29651[29651] -- Fix issue where a failing artifact verification does not remove the bad artifact. {pull}30281[30281] -- Reduce Elastic Agent shut down time by stopping processes concurrently {pull}29650[29650] +- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] +- Thread safe sorted set {pull-beats}[21290] +- Copy Action store on upgrade {pull-beats}[21298] +- Include inputs in action store actions {pull-beats}[21298] +- Fix issue where inputs without processors defined would panic {pull-beats}[21628] +- Prevent reporting ecs version twice {pull-beats}[21616] +- Partial extracted beat result in failure to spawn beat {issue-beats}[21718] +- Use symlink path for reexecutions {pull-beats}[21835] +- Use ML_SYSTEM to detect if agent is running as a service {pull-beats}[21884] +- Use local temp instead of system one {pull-beats}[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull-beats}[21932] +- Fix issue with named pipes on Windows 7 {pull-beats}[21931] +- Fix missing elastic_agent event data {pull-beats}[21994] +- Ensure shell wrapper path exists before writing wrapper on install {pull-beats}[22144] +- Fix deb/rpm packaging for Elastic Agent {pull-beats}[22153] +- Fix composable input processor promotion to fix duplicates {pull-beats}[22344] +- Fix sysv init files for deb/rpm installation {pull-beats}[22543] +- Fix shell wrapper for deb/rpm packaging {pull-beats}[23038] +- Fixed parsing of npipe URI {pull-beats}[22978] +- Select default agent policy if no enrollment token provided. {pull-beats}[23973] +- Remove artifacts on transient download errors {pull-beats}[23235] +- Support for linux/arm64 {pull-beats}[23479] +- Skip top level files when unziping archive during upgrade {pull-beats}[23456] +- Do not take ownership of Endpoint log path {pull-beats}[23444] +- Fixed fetching DBus service PID {pull-beats}[23496] +- Fix issue of missing log messages from filebeat monitor {pull-beats}[23514] +- Increase checkin grace period to 30 seconds {pull-beats}[23568] +- Fix libbeat from reporting back degraded on config update {pull-beats}[23537] +- Rewrite check if agent is running with admin rights on Windows {pull-beats}[23970] +- Fix issues with dynamic inputs and conditions {pull-beats}[23886] +- Fix bad substitution of API key. {pull-beats}[24036] +- Fix docker enrollment issue related to Fleet Server change. {pull-beats}[24155] +- Improve log on failure of Endpoint Security installation. {pull-beats}[24429] +- Verify communication to Kibana before updating Fleet client. {pull-beats}[24489] +- Fix nil pointer when null is generated as list item. {issue-beats}[23734] +- Add support for filestream input. {pull-beats}[24820] +- Add check for URL set when cert and cert key. {pull-beats}[24904] +- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull-beats}[24981] +- Respect host configuration for exposed processes endpoint {pull-beats}[25114] +- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull-beats}[25137] +- Fixed: limit for retries to Kibana configurable {issue-beats}[25063] +- Fix issue with status and inspect inside of container {pull-beats}[25204] +- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull-beats}[25149] +- Reduce log level for listener cleanup to debug {pull-beats} +- Passing in policy id to container command works {pull-beats}[25352] +- Reduce log level for listener cleanup to debug {pull-beats}[25274] +- Delay the restart of application when a status report of failure is given {pull-beats}[25339] +- Don't log when upgrade capability doesn't apply {pull-beats}[25386] +- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue-beats}[25371] +- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue-beats}[24453] +- Fix AckBatch to do nothing when no actions passed {pull-beats}[25562] +- Add error log entry when listener creation fails {issue-beats}[23482] +- Handle case where policy doesn't contain Fleet connection information {pull-beats}[25707] +- Fix fleet-server.yml spec to not overwrite existing keys {pull-beats}[25741] +- Agent sends wrong log level to Endpoint {issue-beats}[25583] +- Fix startup with failing configuration {pull-beats}[26057] +- Change timestamp in elatic-agent-json.log to use UTC {issue-beats}[25391] +- Fix add support for Logstash output. {pull-beats}[24305] +- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull-beats}[26583] +- Fix issue where proxy enrollment options broke enrollment command. {pull-beats}[26749] +- Remove symlink.prev from previously failed upgrade {pull-beats}[26785] +- Fix apm-server supported outputs not being in sync with supported output types. {pull-beats}[26885] +- Set permissions during installation {pull-beats}[26665] +- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] +- Fix issue with atomic extract running in K8s {pull-beats}[27396] +- Fix issue with install directory in state path in K8s {pull-beats}[27396] +- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] +- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue-beats}[27670] {pull-beats}[27671] +- Add validation for certificate flags to ensure they are absolute paths. {pull-beats}[27779] +- Migrate state on upgrade {pull-beats}[27825] +- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue-beats}[25449] +- Ignore ErrNotExists when fixing permissions. {issue-beats}[27836] {pull-beats}[27846] +- Snapshot artifact lookup will use agent.download proxy settings. {issue-beats}[27903] {pull-beats}[27904] +- Fix lazy acker to only add new actions to the batch. {pull-beats}[27981] +- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull-beats}[28260] +- Fix agent configuration overwritten by default fleet config. {pull-beats}[29297] +- Allow agent containers to use basic auth to create a service token. {pull-beats}[29651] +- Fix issue where a failing artifact verification does not remove the bad artifact. {pull-beats}[30281] +- Reduce Elastic Agent shut down time by stopping processes concurrently {pull-beats}[29650] - Move `context cancelled` error from fleet gateway into debug level. {pull}187[187] - Update library containerd to 1.5.10. {pull}186[186] - Add fleet-server to output of elastic-agent inspect output command (and diagnostic bundle). {pull}243[243] - Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] - diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] - Collects stdout and stderr of applications run as a process and logs them. {issue}[88] +- Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] +- diagnostics collect file mod times are set. {pull}570[570] +- Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] +- Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] +- Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] +- Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] +- Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] ==== New features -- Prepare packaging for endpoint and asc files {pull}20186[20186] -- Improved version CLI {pull}20359[20359] -- Enroll CLI now restarts running daemon {pull}20359[20359] -- Add restart CLI cmd {pull}20359[20359] -- Add new `synthetics/*` inputs to run Heartbeat {pull}20387[20387] -- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue}20312[20312] {pull}20713[20713] -- Add `docker` composable dynamic provider. {pull}20842[20842] -- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull}20839[20839] -- Add support for EQL based condition on inputs {pull}20994[20994] -- Send `fleet.host.id` to Endpoint Security {pull}21042[21042] -- Add `install` and `uninstall` subcommands {pull}21206[21206] -- Use new form of fleet API paths {pull}21478[21478] -- Add `kubernetes` composable dynamic provider. {pull}21480[21480] -- Send updating state {pull}21461[21461] -- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull}21543[21543] -- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull}21425[21425] -- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull}21599[21599] -- Update `install` command to perform enroll before starting Elastic Agent {pull}21772[21772] -- Update `fleet.kibana.path` from a POLICY_CHANGE {pull}21804[21804] -- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull}21694[21694] -- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull}22352[22352] -- Ship `endpoint-security` logs to elasticsearch {pull}22526[22526] -- Log level reloadable from fleet {pull}22690[22690] -- Push log level downstream {pull}22815[22815] -- Add metrics collection for Agent {pull}22793[22793] -- Add support for Fleet Server {pull}23736[23736] -- Add support for enrollment with local bootstrap of Fleet Server {pull}23865[23865] -- Add TLS support for Fleet Server {pull}24142[24142] -- Add support for Fleet Server running under Elastic Agent {pull}24220[24220] -- Add CA support to Elastic Agent docker image {pull}24486[24486] -- Add k8s secrets provider for Agent {pull}24789[24789] -- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull}24817[24817] -- Add status subcommand {pull}24856[24856] -- Add leader_election provider for k8s {pull}24267[24267] -- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull}25083[25083] -- Keep http and logging config during enroll {pull}25132[25132] -- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull}25150[25150] -- Use `filestream` input for internal log collection. {pull}25660[25660] -- Enable agent to send custom headers to kibana/ES {pull}26275[26275] -- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue}21121[21121] {pull}26394[26394] {pull}26548[26548] -- Add proxy support to artifact downloader and communication with fleet server. {pull}25219[25219] -- Add proxy support to enroll command. {pull}26514[26514] -- Enable configuring monitoring namespace {issue}26439[26439] -- Communicate with Fleet Server over HTTP2. {pull}26474[26474] -- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue}26758[26758] {pull}26828[26828] -- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull}26801[26801] -- Increase Agent's mem limits in k8s. {pull}27153[27153] -- Add new --enroll-delay option for install and enroll commands. {pull}27118[27118] -- Add link to troubleshooting guide on fatal exits. {issue}26367[26367] {pull}27236[27236] -- Agent now adapts the beats queue size based on output settings. {issue}26638[26638] {pull}27429[27429] -- Support ephemeral containers in Kubernetes dynamic provider. {issue}27020[#27020] {pull}27707[27707] -- Add complete k8s metadata through composable provider. {pull}27691[27691] -- Add diagnostics command to gather beat metadata. {pull}28265[28265] -- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull}28461[28461] -- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull}28096[28096] -- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull}28983[28983] -- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull}28798[28798] -- Allow pprof endpoints for elastic-agent or beats if enabled. {pull}28983[28983] {pull}29155[29155] -- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull}29128[29128] -- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] -- Add results of inspect output command into archive produced by diagnostics collect. {pull}29902[29902] -- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull}30087[30087] -- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull}30289[30289] -- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull}30462[30462] -- Add action_input_type for the .fleet-actions-results {pull}30562[30562] -- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull}30471[30471] +- Prepare packaging for endpoint and asc files {pull-beats}[20186] +- Improved version CLI {pull-beats}[20359] +- Enroll CLI now restarts running daemon {pull-beats}[20359] +- Add restart CLI cmd {pull-beats}[20359] +- Add new `synthetics/*` inputs to run Heartbeat {pull-beats}[20387] +- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue-beats}[20312] {pull-beats}[20713] +- Add `docker` composable dynamic provider. {pull-beats}[20842] +- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull-beats}[20839] +- Add support for EQL based condition on inputs {pull-beats}[20994] +- Send `fleet.host.id` to Endpoint Security {pull-beats}[21042] +- Add `install` and `uninstall` subcommands {pull-beats}[21206] +- Use new form of fleet API paths {pull-beats}[21478] +- Add `kubernetes` composable dynamic provider. {pull-beats}[21480] +- Send updating state {pull-beats}[21461] +- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull-beats}[21543] +- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull-beats}[21425] +- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull-beats}[21599] +- Update `install` command to perform enroll before starting Elastic Agent {pull-beats}[21772] +- Update `fleet.kibana.path` from a POLICY_CHANGE {pull-beats}[21804] +- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull-beats}[21694] +- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull-beats}[22352] +- Ship `endpoint-security` logs to elasticsearch {pull-beats}[22526] +- Log level reloadable from fleet {pull-beats}[22690] +- Push log level downstream {pull-beats}[22815] +- Add metrics collection for Agent {pull-beats}[22793] +- Add support for Fleet Server {pull-beats}[23736] +- Add support for enrollment with local bootstrap of Fleet Server {pull-beats}[23865] +- Add TLS support for Fleet Server {pull-beats}[24142] +- Add support for Fleet Server running under Elastic Agent {pull-beats}[24220] +- Add CA support to Elastic Agent docker image {pull-beats}[24486] +- Add k8s secrets provider for Agent {pull-beats}[24789] +- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull-beats}[24817] +- Add status subcommand {pull-beats}[24856] +- Add leader_election provider for k8s {pull-beats}[24267] +- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull-beats}[25083] +- Keep http and logging config during enroll {pull-beats}[25132] +- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull-beats}[25150] +- Use `filestream` input for internal log collection. {pull-beats}[25660] +- Enable agent to send custom headers to kibana/ES {pull-beats}[26275] +- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue-beats}[21121] {pull-beats}[26394] {pull-beats}[26548] +- Add proxy support to artifact downloader and communication with fleet server. {pull-beats}[25219] +- Add proxy support to enroll command. {pull-beats}[26514] +- Enable configuring monitoring namespace {issue-beats}[26439] +- Communicate with Fleet Server over HTTP2. {pull-beats}[26474] +- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue-beats}[26758] {pull-beats}[26828] +- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull-beats}[26801] +- Increase Agent's mem limits in k8s. {pull-beats}[27153] +- Add new --enroll-delay option for install and enroll commands. {pull-beats}[27118] +- Add link to troubleshooting guide on fatal exits. {issue-beats}[26367] {pull-beats}[27236] +- Agent now adapts the beats queue size based on output settings. {issue-beats}[26638] {pull-beats}[27429] +- Support ephemeral containers in Kubernetes dynamic provider. {issue-beats}[#27020] {pull-beats}[27707] +- Add complete k8s metadata through composable provider. {pull-beats}[27691] +- Add diagnostics command to gather beat metadata. {pull-beats}[28265] +- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull-beats}[28461] +- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull-beats}[28096] +- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull-beats}[28983] +- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull-beats}[28798] +- Allow pprof endpoints for elastic-agent or beats if enabled. {pull-beats}[28983] {pull-beats}[29155] +- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull-beats}[29128] +- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull-beats}[23139] +- Add results of inspect output command into archive produced by diagnostics collect. {pull-beats}[29902] +- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull-beats}[30087] +- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull-beats}[30289] +- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull-beats}[30462] +- Add action_input_type for the .fleet-actions-results {pull-beats}[30562] +- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull-beats}[30471] - Update ack response schema and processing, add retrier for acks {pull}200[200] - Enhance error messages and logs for process start {pull}225[225] -- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue}29774[29774] {pull}226[226] +- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue-beats}[29774] {pull}226[226] - Add Elastic APM instrumentation {pull}180[180] - Agent can be built for `darwin/arm64`. When it's built for both `darwin/arm64` and `darwin/adm64` a universal binary is also built and packaged. {pull}203[203] - Add support for Cloudbeat. {pull}179[179] @@ -183,3 +193,7 @@ - Save the agent configuration and the state encrypted on the disk. {issue}535[535] {pull}398[398] - Bump node.js version for heartbeat/synthetics to 16.15.0 - Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] +- Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] +- Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] +- Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] +- Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] diff --git a/NOTICE.txt b/NOTICE.txt index 56f82316620..1451a7531c1 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1061,11 +1061,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.3 +Version: v0.2.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.6/LICENSE: Apache License Version 2.0, January 2004 @@ -6576,11 +6576,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/containerd/containerd -Version: v1.5.10 +Version: v1.5.13 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.5.10/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.5.13/LICENSE: Apache License @@ -14276,11 +14276,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : go.uber.org/goleak -Version: v1.1.11 +Version: v1.1.12 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.1.11/LICENSE: +Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.1.12/LICENSE: The MIT License (MIT) @@ -15343,11 +15343,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : gopkg.in/yaml.v3 -Version: v3.0.0-20210107192922-496545a6307b +Version: v3.0.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.0-20210107192922-496545a6307b/LICENSE: +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.1/LICENSE: This project is covered by two different licenses: MIT and Apache. diff --git a/README.md b/README.md index 25aff95042e..2c0dbe31f69 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,13 @@ Prerequisites: - [Docker](https://docs.docker.com/get-docker/) - [X-pack](https://github.com/elastic/beats/tree/main/x-pack) to pre-exist in the parent folder of the local Git repository checkout +If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD +``` +export DOCKER_BUILDKIT=0 +export COMPOSE_DOCKER_CLI_BUILD=0 +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` + In Linux operating systems that you can not run docker as a root user you need to follow [linux-postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/) ### Testing docker container @@ -17,7 +24,7 @@ In Linux operating systems that you can not run docker as a root user you need t Running Elastic Agent in a docker container is a common use case. To build the Elastic Agent and create a docker image run the following command: ``` -DEV=true SNAPSHOT=true PLATFORMS=linux/amd64 TYPES=docker mage package +DEV=true SNAPSHOT=true PLATFORMS=linux/amd64 PACKAGES=docker mage package ``` If you are in the 7.13 branch, this will create the `docker.elastic.co/beats/elastic-agent:7.13.0-SNAPSHOT` image in your local environment. Now you can use this to for example test this container with the stack in elastic-package: @@ -45,7 +52,7 @@ for the standard variant. 1. Build elastic-agent: ```bash -DEV=true PLATFORMS=linux/amd64 TYPES=docker mage package +DEV=true PLATFORMS=linux/amd64 PACKAGES=docker mage package ``` Use environmental variables `GOHOSTOS` and `GOHOSTARCH` to specify PLATFORMS variable accordingly. eg. diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 882e7b46e21..1e2403f47a2 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,31 +15,41 @@ spec: labels: app: elastic-agent spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host. + # Sharing the host process ID namespace gives visibility of all processes running on the same host. + hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent image: docker.elastic.co/beats/elastic-agent:8.3.0 env: + # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL value: "1" - # Set to true in case of insecure or unverified HTTP + # Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS - name: FLEET_INSECURE value: "true" - # The ip:port pair of fleet server + # Fleet Server URL to enroll the Elastic Agent into + # FLEET_URL can be found in Kibana, go to Management > Fleet > Settings - name: FLEET_URL value: "https://fleet-server:8220" - # If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed + # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) + # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN value: "" - name: KIBANA_HOST value: "http://kibana:5601" + # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_USERNAME value: "elastic" + # The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_PASSWORD value: "changeme" - name: NODE_NAME @@ -85,6 +96,9 @@ spec: - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true + - name: etc-mid + mountPath: /etc/machine-id + readOnly: true volumes: - name: proc hostPath: @@ -98,21 +112,32 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd + # Mount /etc/machine-id from the host to determine host ID + # Needed for Elastic Security integration + - name: etc-mid + hostPath: + path: /etc/machine-id + type: File --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -170,6 +195,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -201,11 +227,12 @@ rules: - jobs - cronjobs verbs: [ "get", "list", "watch" ] - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -213,6 +240,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -222,7 +250,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index 097d9786e03..c3c679efa36 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,31 +15,41 @@ spec: labels: app: elastic-agent spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host. + # Sharing the host process ID namespace gives visibility of all processes running on the same host. + hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent image: docker.elastic.co/beats/elastic-agent:%VERSION% env: + # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL value: "1" - # Set to true in case of insecure or unverified HTTP + # Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS - name: FLEET_INSECURE value: "true" - # The ip:port pair of fleet server + # Fleet Server URL to enroll the Elastic Agent into + # FLEET_URL can be found in Kibana, go to Management > Fleet > Settings - name: FLEET_URL value: "https://fleet-server:8220" - # If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed + # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) + # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN value: "" - name: KIBANA_HOST value: "http://kibana:5601" + # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_USERNAME value: "elastic" + # The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_PASSWORD value: "changeme" - name: NODE_NAME @@ -85,6 +96,9 @@ spec: - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true + - name: etc-mid + mountPath: /etc/machine-id + readOnly: true volumes: - name: proc hostPath: @@ -98,18 +112,29 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd + # Mount /etc/machine-id from the host to determine host ID + # Needed for Elastic Security integration + - name: etc-mid + hostPath: + path: /etc/machine-id + type: File diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml index 0ef5b850782..0d961215f4e 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml @@ -13,6 +13,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -44,11 +45,12 @@ rules: - jobs - cronjobs verbs: [ "get", "list", "watch" ] - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -56,6 +58,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -65,7 +68,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index ab360f19bcb..0984f0dc8ac 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -63,7 +64,9 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s - # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt @@ -76,6 +79,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_daemonset type: metrics @@ -85,6 +94,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -94,6 +109,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_job type: metrics @@ -103,6 +124,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_node type: metrics @@ -112,6 +139,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolume type: metrics @@ -121,6 +154,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics @@ -130,6 +169,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_pod type: metrics @@ -139,6 +184,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_replicaset type: metrics @@ -148,6 +199,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_resourcequota type: metrics @@ -157,6 +214,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_service type: metrics @@ -166,6 +229,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_statefulset type: metrics @@ -175,6 +244,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_storageclass type: metrics @@ -184,6 +259,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - name: system-logs type: logfile use_output: default @@ -227,6 +308,7 @@ data: fields: ecs.version: 1.12.0 - name: container-log + id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -252,6 +334,7 @@ data: paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log + id: audit-log type: filestream use_output: default meta: @@ -415,7 +498,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler @@ -428,7 +511,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy @@ -437,7 +520,7 @@ data: - proxy hosts: - 'localhost:10249' - # Openshift: + # On Openshift port should be adjusted: # - 'localhost:29101' period: 10s - data_stream: @@ -557,6 +640,8 @@ spec: labels: app: elastic-agent-standalone spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule @@ -571,10 +656,14 @@ spec: "-e", ] env: + # The basic authentication username used to connect to Elasticsearch + # This user needs the privileges required to publish events to Elasticsearch. - name: ES_USERNAME value: "elastic" + # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD value: "" + # The Elasticsearch host to communicate with - name: ES_HOST value: "" - name: NODE_NAME @@ -642,18 +731,23 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd @@ -714,6 +808,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -745,11 +840,12 @@ rules: - nodes/stats verbs: - get - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -757,6 +853,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -766,7 +863,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 6894f32bbe4..7048bf22adb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -63,7 +64,9 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s - # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt @@ -76,6 +79,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_daemonset type: metrics @@ -85,6 +94,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -94,6 +109,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_job type: metrics @@ -103,6 +124,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_node type: metrics @@ -112,6 +139,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolume type: metrics @@ -121,6 +154,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics @@ -130,6 +169,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_pod type: metrics @@ -139,6 +184,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_replicaset type: metrics @@ -148,6 +199,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_resourcequota type: metrics @@ -157,6 +214,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_service type: metrics @@ -166,6 +229,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_statefulset type: metrics @@ -175,6 +244,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_storageclass type: metrics @@ -184,6 +259,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - name: system-logs type: logfile use_output: default @@ -227,6 +308,7 @@ data: fields: ecs.version: 1.12.0 - name: container-log + id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -252,6 +334,7 @@ data: paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log + id: audit-log type: filestream use_output: default meta: @@ -415,7 +498,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler @@ -428,7 +511,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy @@ -437,7 +520,7 @@ data: - proxy hosts: - 'localhost:10249' - # Openshift: + # On Openshift port should be adjusted: # - 'localhost:29101' period: 10s - data_stream: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 2a0f23107f1..0bf131ec8ea 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -14,6 +14,8 @@ spec: labels: app: elastic-agent-standalone spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule @@ -28,10 +30,14 @@ spec: "-e", ] env: + # The basic authentication username used to connect to Elasticsearch + # This user needs the privileges required to publish events to Elasticsearch. - name: ES_USERNAME value: "elastic" + # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD value: "" + # The Elasticsearch host to communicate with - name: ES_HOST value: "" - name: NODE_NAME @@ -99,18 +105,23 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml index b253f0520fe..8a644f3aadf 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml @@ -13,6 +13,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -44,11 +45,12 @@ rules: - nodes/stats verbs: - get - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -56,6 +58,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -65,7 +68,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone diff --git a/dev-tools/packaging/files/ironbank/LICENSE b/dev-tools/packaging/files/ironbank/LICENSE new file mode 100644 index 00000000000..ef2739c152e --- /dev/null +++ b/dev-tools/packaging/files/ironbank/LICENSE @@ -0,0 +1,280 @@ +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. + + +GOVERNMENT END USER ADDENDUM TO THE ELASTIC LICENSE AGREEMENT + + This ADDENDUM TO THE ELASTIC LICENSE AGREEMENT (this "Addendum") applies +only to U.S. Federal Government, State Government, and Local Government +entities ("Government End Users") of the Elastic Software. This Addendum is +subject to, and hereby incorporated into, the Elastic License Agreement, +which is being entered into as of even date herewith, by Elastic and You (the +"Agreement"). This Addendum sets forth additional terms and conditions +related to Your use of the Elastic Software. Capitalized terms not defined in +this Addendum have the meaning set forth in the Agreement. + + 1. LIMITED LICENSE TO DISTRIBUTE (DSOP ONLY). Subject to the terms and +conditions of the Agreement (including this Addendum), Elastic grants the +Department of Defense Enterprise DevSecOps Initiative (DSOP) a royalty-free, +non-exclusive, non-transferable, limited license to reproduce and distribute +the Elastic Software solely through a software distribution repository +controlled and managed by DSOP, provided that DSOP: (i) distributes the +Elastic Software complete and unmodified, inclusive of the Agreement +(including this Addendum) and (ii) does not remove or alter any proprietary +legends or notices contained in the Elastic Software. + + 2. CHOICE OF LAW. The choice of law and venue provisions set forth shall +prevail over those set forth in Section 5 of the Agreement. + + "For U.S. Federal Government Entity End Users. This Agreement and any + non-contractual obligation arising out of or in connection with it, is + governed exclusively by U.S. Federal law. To the extent permitted by + federal law, the laws of the State of Delaware (excluding Delaware choice + of law rules) will apply in the absence of applicable federal law. + + For State and Local Government Entity End Users. This Agreement and any + non-contractual obligation arising out of or in connection with it, is + governed exclusively by the laws of the state in which you are located + without reference to conflict of laws. Furthermore, the Parties agree that + the Uniform Computer Information Transactions Act or any version thereof, + adopted by any state in any form ('UCITA'), shall not apply to this + Agreement and, to the extent that UCITA is applicable, the Parties agree to + opt out of the applicability of UCITA pursuant to the opt-out provision(s) + contained therein." + + 3. ELASTIC LICENSE MODIFICATION. Section 5 of the Agreement is hereby +amended to replace + + "This Agreement may be modified by Elastic from time to time, and any + such modifications will be effective upon the "Posted Date" set forth at + the top of the modified Agreement." + + with: + + "This Agreement may be modified by Elastic from time to time; provided, + however, that any such modifications shall apply only to Elastic Software + that is installed after the "Posted Date" set forth at the top of the + modified Agreement." + +V100820.0 diff --git a/dev-tools/packaging/files/ironbank/config/docker-entrypoint b/dev-tools/packaging/files/ironbank/config/docker-entrypoint new file mode 100644 index 00000000000..7ebe21745f4 --- /dev/null +++ b/dev-tools/packaging/files/ironbank/config/docker-entrypoint @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eo pipefail + +# For information on the possible environment variables that can be passed into the container. Run the following +# command for information on the options that are available. +# +# `./elastic-agent container --help` +# + +elastic-agent container "$@" diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 0c2e0da906e..c02c0596e0e 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -1,910 +1,902 @@ ---- - -# This file contains the package specifications for both Community Beats and -# Official Beats. The shared section contains YAML anchors that are used to -# define common parts of the package in order to not repeat ourselves. - -shared: - - &common - name: '{{.BeatName}}' - service_name: '{{.BeatServiceName}}' - os: '{{.GOOS}}' - arch: '{{.PackageArch}}' - vendor: '{{.BeatVendor}}' - version: '{{ beat_version }}' - license: '{{.BeatLicense}}' - url: '{{.BeatURL}}' - description: '{{.BeatDescription}}' - - # agent specific - # Deb/RPM spec for community beats. - - &deb_rpm_agent_spec - <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /usr/share/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.reference.yml: - source: 'elastic-agent.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.yml: - source: 'elastic-agent.yml' - mode: 0600 - config: true - /etc/{{.BeatName}}/.elastic-agent.active.commit: - content: > - {{ commit }} - mode: 0644 - /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: - source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} - mode: 0755 - /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl' - mode: 0755 - /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl' - mode: 0644 - /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' - mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - # MacOS pkg spec for community beats. - - &macos_agent_pkg_spec - <<: *common - extra_vars: - # OS X 10.11 El Capitan is the oldest supported by Go 1.14. - # https://golang.org/doc/go1.14#ports - min_supported_osx_version: 10.11 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' - install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.elastic-agent.tmpl' - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.reference.yml: - source: 'elastic-agent.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.yml: - source: 'elastic-agent.yml' - mode: 0600 - config: true - /etc/{{.BeatName}}/.elastic-agent.active.commit: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - - &agent_binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - .build_hash.txt: - content: > - {{ commit }} - mode: 0644 - 'elastic-agent.reference.yml': - source: 'elastic-agent.reference.yml' - mode: 0644 - 'elastic-agent.yml': - source: 'elastic-agent.yml' - mode: 0600 - config: true - '.elastic-agent.active.commit': - content: > - {{ commit }} - mode: 0644 - - # Binary package spec (tar.gz for linux/darwin) for community beats. - - &agent_binary_spec - <<: *common - files: - <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/components': - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - - # Binary package spec (zip for windows) for community beats. - - &agent_windows_binary_spec - <<: *common - files: - <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/components': - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.zip/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - - &agent_docker_spec - <<: *agent_binary_spec - extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - dockerfile: 'Dockerfile.elastic-agent.tmpl' - docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' - user: '{{ .BeatName }}' - linux_capabilities: '' - image_name: '' - beats_install_path: "install" - files: - 'elastic-agent.yml': - source: 'elastic-agent.docker.yml' - mode: 0600 - config: true - '.elastic-agent.active.commit': - content: > - {{ commit }} - mode: 0644 - 'data/cloud_downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0755 - 'data/cloud_downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0755 - - - &agent_docker_arm_spec - <<: *agent_docker_spec - extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' - - - &agent_docker_cloud_spec - <<: *agent_docker_spec - extra_vars: - image_name: '{{.BeatName}}-cloud' - repository: 'docker.elastic.co/beats-ci' - - - &agent_docker_complete_spec - <<: *agent_docker_spec - extra_vars: - image_name: '{{.BeatName}}-complete' - - # Deb/RPM spec for community beats. - - &deb_rpm_spec - <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /etc/{{.BeatName}}/fields.yml: - source: fields.yml - mode: 0644 - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /usr/share/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - /usr/share/{{.BeatName}}/kibana: - source: _meta/kibana.generated - mode: 0644 - /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: - source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} - mode: 0755 - /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' - mode: 0755 - /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' - mode: 0644 - /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' - mode: 0755 - - # MacOS pkg spec for community beats. - - &macos_beat_pkg_spec - <<: *common - extra_vars: - # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. - # https://golang.org/doc/go1.10#ports - min_supported_osx_version: 10.8 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' - install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: - source: _meta/kibana.generated - mode: 0644 - /etc/{{.BeatName}}/fields.yml: - source: fields.yml - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - - - &binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - fields.yml: - source: fields.yml - mode: 0644 - LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - .build_hash.txt: - content: > - {{ commit }} - mode: 0644 - '{{.BeatName}}.reference.yml': - source: '{{.BeatName}}.reference.yml' - mode: 0644 - '{{.BeatName}}.yml': - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - kibana: - source: _meta/kibana.generated - mode: 0644 - - # Binary package spec (tar.gz for linux/darwin) for community beats. - - &binary_spec - <<: *common - files: - <<: *binary_files - - # Binary package spec (zip for windows) for community beats. - - &windows_binary_spec - <<: *common - files: - <<: *binary_files - install-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' - mode: 0755 - uninstall-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' - mode: 0755 - - - &docker_spec - <<: *binary_spec - extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - user: '{{ .BeatName }}' - linux_capabilities: '' - files: - '{{.BeatName}}.yml': - source: '{{.BeatName}}.docker.yml' - mode: 0600 - config: true - - - &docker_arm_spec - <<: *docker_spec - extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' - - - &docker_ubi_spec - extra_vars: - image_name: '{{.BeatName}}-ubi8' - from: 'docker.elastic.co/ubi8/ubi-minimal' - - - &docker_arm_ubi_spec - extra_vars: - image_name: '{{.BeatName}}-ubi8' - from: 'registry.access.redhat.com/ubi8/ubi-minimal:8.2' - - - &elastic_docker_spec - extra_vars: - repository: 'docker.elastic.co/beats' - - # - # License modifiers for Apache 2.0 - # - - &apache_license_for_binaries - license: "ASL 2.0" - files: - LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - - &apache_license_for_deb_rpm - license: "ASL 2.0" - files: - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - - &apache_license_for_macos_pkg - license: "ASL 2.0" - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - # - # License modifiers for the Elastic License - # - - &elastic_license_for_binaries - license: "Elastic License" - files: - LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - - - &elastic_license_for_deb_rpm - license: "Elastic License" - files: - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - - - &elastic_license_for_macos_pkg - license: "Elastic License" - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - -# specs is a list of named packaging "flavors". -specs: - # Community Beats - community_beat: - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - - - os: linux - types: [docker] - spec: - <<: *docker_spec - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - - # Elastic Beat with Apache License (OSS) and binary taken the current - # directory. - elastic_beat_oss: - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *apache_license_for_deb_rpm - name: '{{.BeatName}}-oss' - - - os: linux - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - # Elastic Beat with Elastic License and binary taken the current directory. - elastic_beat_xpack: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *elastic_license_for_deb_rpm - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - # Elastic Beat with Elastic License and binary taken the current directory. - elastic_beat_xpack_reduced: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_xpack_separate_binaries: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_agent_binaries: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *agent_windows_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: darwin - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_agent_spec - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *agent_docker_complete_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *agent_docker_spec - <<: *agent_docker_cloud_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *agent_docker_complete_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *agent_docker_arm_spec - <<: *agent_docker_cloud_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_agent_demo_binaries: - ### - # Elastic Licensed Packages - ### - - - os: linux - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [docker] - spec: - <<: *agent_docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} +--- + +# This file contains the package specifications for both Community Beats and +# Official Beats. The shared section contains YAML anchors that are used to +# define common parts of the package in order to not repeat ourselves. + +shared: + - &common + name: '{{.BeatName}}' + service_name: '{{.BeatServiceName}}' + os: '{{.GOOS}}' + arch: '{{.PackageArch}}' + vendor: '{{.BeatVendor}}' + version: '{{ beat_version }}' + license: '{{.BeatLicense}}' + url: '{{.BeatURL}}' + description: '{{.BeatDescription}}' + + # agent specific + # Deb/RPM spec for community beats. + - &deb_rpm_agent_spec + <<: *common + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/postinstall.sh.tmpl' + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /usr/share/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.reference.yml: + source: 'elastic-agent.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.yml: + source: 'elastic-agent.yml' + mode: 0600 + config: true + /etc/{{.BeatName}}/.elastic-agent.active.commit: + content: > + {{ commit }} + mode: 0644 + /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: + source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} + mode: 0755 + /usr/bin/{{.BeatName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl' + mode: 0755 + /lib/systemd/system/{{.BeatServiceName}}.service: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl' + mode: 0644 + /etc/init.d/{{.BeatServiceName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' + mode: 0755 + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + # MacOS pkg spec for community beats. + - &macos_agent_pkg_spec + <<: *common + extra_vars: + # OS X 10.11 El Capitan is the oldest supported by Go 1.14. + # https://golang.org/doc/go1.14#ports + min_supported_osx_version: 10.11 + identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + install_path: /Library/Application Support + pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.elastic-agent.tmpl' + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.reference.yml: + source: 'elastic-agent.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.yml: + source: 'elastic-agent.yml' + mode: 0600 + config: true + /etc/{{.BeatName}}/.elastic-agent.active.commit: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + - &agent_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + .build_hash.txt: + content: > + {{ commit }} + mode: 0644 + 'elastic-agent.reference.yml': + source: 'elastic-agent.reference.yml' + mode: 0644 + 'elastic-agent.yml': + source: 'elastic-agent.yml' + mode: 0600 + config: true + '.elastic-agent.active.commit': + content: > + {{ commit }} + mode: 0644 + + # Binary package spec (tar.gz for linux/darwin) for community beats. + - &agent_binary_spec + <<: *common + files: + <<: *agent_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + + # Binary package spec (zip for windows) for community beats. + - &agent_windows_binary_spec + <<: *common + files: + <<: *agent_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.zip/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + - &agent_docker_spec + <<: *agent_binary_spec + extra_vars: + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' + dockerfile: 'Dockerfile.elastic-agent.tmpl' + docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' + user: '{{ .BeatName }}' + linux_capabilities: '' + image_name: '' + beats_install_path: "install" + files: + 'elastic-agent.yml': + source: 'elastic-agent.docker.yml' + mode: 0600 + config: true + '.elastic-agent.active.commit': + content: > + {{ commit }} + mode: 0644 + 'data/cloud_downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0755 + 'data/cloud_downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0755 + + - &agent_docker_arm_spec + <<: *agent_docker_spec + extra_vars: + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' + + - &agent_docker_cloud_spec + <<: *agent_docker_spec + extra_vars: + image_name: '{{.BeatName}}-cloud' + repository: 'docker.elastic.co/beats-ci' + + - &agent_docker_complete_spec + <<: *agent_docker_spec + extra_vars: + image_name: '{{.BeatName}}-complete' + + # Deb/RPM spec for community beats. + - &deb_rpm_spec + <<: *common + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /etc/{{.BeatName}}/fields.yml: + source: fields.yml + mode: 0644 + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /usr/share/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: + source: '{{.BeatName}}.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.yml: + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + /usr/share/{{.BeatName}}/kibana: + source: _meta/kibana.generated + mode: 0644 + /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: + source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} + mode: 0755 + /usr/bin/{{.BeatName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' + mode: 0755 + /lib/systemd/system/{{.BeatServiceName}}.service: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' + mode: 0644 + /etc/init.d/{{.BeatServiceName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' + mode: 0755 + + # MacOS pkg spec for community beats. + - &macos_beat_pkg_spec + <<: *common + extra_vars: + # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. + # https://golang.org/doc/go1.10#ports + min_supported_osx_version: 10.8 + identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + install_path: /Library/Application Support + pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: + source: _meta/kibana.generated + mode: 0644 + /etc/{{.BeatName}}/fields.yml: + source: fields.yml + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: + source: '{{.BeatName}}.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.yml: + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + + - &binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + fields.yml: + source: fields.yml + mode: 0644 + LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + .build_hash.txt: + content: > + {{ commit }} + mode: 0644 + '{{.BeatName}}.reference.yml': + source: '{{.BeatName}}.reference.yml' + mode: 0644 + '{{.BeatName}}.yml': + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + kibana: + source: _meta/kibana.generated + mode: 0644 + + # Binary package spec (tar.gz for linux/darwin) for community beats. + - &binary_spec + <<: *common + files: + <<: *binary_files + + # Binary package spec (zip for windows) for community beats. + - &windows_binary_spec + <<: *common + files: + <<: *binary_files + install-service-{{.BeatName}}.ps1: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' + mode: 0755 + uninstall-service-{{.BeatName}}.ps1: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' + mode: 0755 + + - &docker_spec + <<: *binary_spec + extra_vars: + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' + user: '{{ .BeatName }}' + linux_capabilities: '' + files: + '{{.BeatName}}.yml': + source: '{{.BeatName}}.docker.yml' + mode: 0600 + config: true + + - &docker_arm_spec + <<: *docker_spec + extra_vars: + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' + + - &docker_ubi_spec + extra_vars: + image_name: '{{.BeatName}}-ubi8' + from: 'docker.elastic.co/ubi8/ubi-minimal' + + - &docker_arm_ubi_spec + extra_vars: + image_name: '{{.BeatName}}-ubi8' + from: 'registry.access.redhat.com/ubi8/ubi-minimal:8.2' + + - &elastic_docker_spec + extra_vars: + repository: 'docker.elastic.co/beats' + + # + # License modifiers for Apache 2.0 + # + - &apache_license_for_binaries + license: "ASL 2.0" + files: + LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + - &apache_license_for_deb_rpm + license: "ASL 2.0" + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + - &apache_license_for_macos_pkg + license: "ASL 2.0" + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + # + # License modifiers for the Elastic License + # + - &elastic_license_for_binaries + license: "Elastic License" + files: + LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + + - &elastic_license_for_deb_rpm + license: "Elastic License" + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + + - &elastic_license_for_macos_pkg + license: "Elastic License" + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + +# specs is a list of named packaging "flavors". +specs: + # Community Beats + community_beat: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + + - os: linux + types: [docker] + spec: + <<: *docker_spec + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + + # Elastic Beat with Apache License (OSS) and binary taken the current + # directory. + elastic_beat_oss: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *apache_license_for_deb_rpm + name: '{{.BeatName}}-oss' + + - os: linux + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + # Elastic Beat with Elastic License and binary taken the current directory. + elastic_beat_xpack: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + # Elastic Beat with Elastic License and binary taken the current directory. + elastic_beat_xpack_reduced: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_xpack_separate_binaries: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_agent_binaries: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *agent_windows_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: darwin + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_agent_spec + <<: *elastic_license_for_deb_rpm + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Complete image gets a 'complete' variant for synthetics and other large + # packages too big to fit in the main image + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *agent_docker_complete_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Cloud specific docker image + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *elastic_docker_spec + <<: *agent_docker_spec + <<: *agent_docker_cloud_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Complete image gets a 'complete' variant for synthetics and other large + # packages too big to fit in the main image + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *agent_docker_complete_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Cloud specific docker image + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *elastic_docker_spec + <<: *agent_docker_arm_spec + <<: *agent_docker_cloud_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_agent_demo_binaries: + ### + # Elastic Licensed Packages + ### + + - os: linux + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [docker] + spec: + <<: *agent_docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} diff --git a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl new file mode 100644 index 00000000000..04c4dfde930 --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl @@ -0,0 +1,90 @@ +################################################################################ +# Build stage 0 +# Extract Elastic Agent and make various file manipulations. +################################################################################ +ARG BASE_REGISTRY=registry1.dsop.io +ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8 +ARG BASE_TAG=8.6 + +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as prep_files + +ARG ELASTIC_STACK={{ beat_version }} +ARG ELASTIC_PRODUCT=elastic-agent +ARG OS_AND_ARCH=linux-x86_64 + +RUN mkdir /usr/share/${ELASTIC_PRODUCT} +WORKDIR /usr/share/${ELASTIC_PRODUCT} +COPY --chown=1000:0 ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz . +RUN tar --strip-components=1 -zxf ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz \ + && rm ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz + +# Support arbitrary user ids +# Ensure that group permissions are the same as user permissions. +# This will help when relying on GID-0 to run Kibana, rather than UID-1000. +# OpenShift does this, for example. +# REF: https://docs.okd.io/latest/openshift_images/create-images.html +RUN chmod -R g=u /usr/share/${ELASTIC_PRODUCT} + +# Create auxiliary folders and assigning default permissions. +RUN mkdir -p /usr/share/${ELASTIC_PRODUCT}/data /usr/share/${ELASTIC_PRODUCT}/logs && \ + chown -R root:root /usr/share/${ELASTIC_PRODUCT} && \ + find /usr/share/${ELASTIC_PRODUCT} -type d -exec chmod 0750 {} \; && \ + find /usr/share/${ELASTIC_PRODUCT} -type f -exec chmod 0640 {} \; && \ + chmod 0750 /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT} && \ + chmod 0770 /usr/share/${ELASTIC_PRODUCT}/data /usr/share/${ELASTIC_PRODUCT}/logs + +################################################################################ +# Build stage 1 +# Copy prepared files from the previous stage and complete the image. +################################################################################ +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} + +ARG ELASTIC_PRODUCT=elastic-agent + +COPY LICENSE /licenses/elastic-${ELASTIC_PRODUCT} + +# Add a dumb init process +COPY tinit /tinit +RUN chmod +x /tinit + +# Bring in product from the initial stage. +COPY --from=prep_files --chown=1000:0 /usr/share/${ELASTIC_PRODUCT} /usr/share/${ELASTIC_PRODUCT} +WORKDIR /usr/share/${ELASTIC_PRODUCT} +RUN ln -s /usr/share/${ELASTIC_PRODUCT} /opt/${ELASTIC_PRODUCT} + +ENV ELASTIC_CONTAINER="true" +RUN ln -s /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT} /usr/bin/${ELASTIC_PRODUCT} + +# Support arbitrary user ids +# Ensure gid 0 write permissions for OpenShift. +RUN chmod -R g+w /usr/share/${ELASTIC_PRODUCT} + +# config file ("${ELASTIC_PRODUCT}.yml") can only be writable by the root and group root +# it is needed on some configurations where the container needs to run as root +RUN chown root:root /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT}.yml \ + && chmod go-w /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT}.yml + +# Remove the suid bit everywhere to mitigate "Stack Clash" +RUN find / -xdev -perm -4000 -exec chmod u-s {} + + +# Provide a non-root user to run the process. +RUN groupadd --gid 1000 ${ELASTIC_PRODUCT} && useradd --uid 1000 --gid 1000 --groups 0 --home-dir /usr/share/${ELASTIC_PRODUCT} --no-create-home ${ELASTIC_PRODUCT} + +# Elastic Agent permissions +RUN find /usr/share//elastic-agent/data -type d -exec chmod 0770 {} \; && \ + find /usr/share//elastic-agent/data -type f -exec chmod 0660 {} \; && \ + chmod +x /usr/share//elastic-agent/data/elastic-agent-*/elastic-agent + +COPY jq /usr/local/bin +RUN chown root:root /usr/local/bin/jq && chmod 0755 /usr/local/bin/jq + +COPY config/docker-entrypoint /usr/local/bin/docker-entrypoint +RUN chmod 755 /usr/local/bin/docker-entrypoint + +USER ${ELASTIC_PRODUCT} +ENV ELASTIC_PRODUCT=${ELASTIC_PRODUCT} + +ENTRYPOINT ["/tinit", "--", "/usr/local/bin/docker-entrypoint"] +CMD [""] + +HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD test -w '/tmp/elastic-agent/elastic-agent.sock' diff --git a/dev-tools/packaging/templates/ironbank/README.md.tmpl b/dev-tools/packaging/templates/ironbank/README.md.tmpl new file mode 100644 index 00000000000..271fdb8c0d7 --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/README.md.tmpl @@ -0,0 +1,43 @@ +# elastic-agent + +**elastic-agent** is a single, unified way to add monitoring for logs, metrics, and other types of data to each host. A single agent makes it easier and faster to deploy monitoring across your infrastructure. The agent’s single, unified configuration makes it easier to add integrations for new data sources. + +For more information about elastic-agent, please visit +https://www.elastic.co/guide/en/ingest-management/7.17/index.html. + +--- + +**NOTE** + +This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. + +--- + +### Installation instructions + +Please follow the documentation on [Quick start](https://www.elastic.co/guide/en/fleet/{{ .MajorMinor }}/fleet-elastic-agent-quick-start.html). + +### Where to file issues and PRs + +- [Issues](https://github.com/elastic/elastic-agent/issues) +- [PRs](https://github.com/elastic/elastic-agent/pulls) + +### DoD Restrictions + +### Where to get help + +- [elastic-agent Discuss Forums](https://discuss.elastic.co/tags/c/elastic-stack/beats/28/elastic-agent) +- [elastic-agent Documentation](https://www.elastic.co/guide/en/ingest-management/current/index.html) + +### Still need help? + +You can learn more about the Elastic Community and also understand how to get more help +visiting [Elastic Community](https://www.elastic.co/community). + +This software is governed by the [Elastic +License](https://github.com/elastic/beats/blob/{{ .MajorMinor }}/licenses/ELASTIC-LICENSE.txt), +and includes the full set of [free +features](https://www.elastic.co/subscriptions). + +View the detailed release notes +[here](https://www.elastic.co/guide/en/beats/libbeat/current/release-notes-{{ beat_version }}.html). diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl new file mode 100644 index 00000000000..3c753caa0fb --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -0,0 +1,68 @@ +--- +apiVersion: v1 + +# The repository name in registry1, excluding /ironbank/ +name: "elastic/beats/elastic-agent" + +# List of tags to push for the repository in registry1 +# The most specific version should be the first tag and will be shown +# on ironbank.dsop.io +tags: +- "{{ beat_version }}" +- "latest" + +# Build args passed to Dockerfile ARGs +args: + BASE_IMAGE: "redhat/ubi/ubi8" + BASE_TAG: "8.6" + ELASTIC_STACK: "{{ beat_version }}" + ELASTIC_PRODUCT: "elastic-agent" + +# Docker image labels +labels: + org.opencontainers.image.title: "elastic-agent" + ## Human-readable description of the software packaged in the image + org.opencontainers.image.description: "elastic-agent is a single, unified way to add monitoring for logs, metrics, and other types of data to each host" + ## License(s) under which contained software is distributed + org.opencontainers.image.licenses: "Elastic License" + ## URL to find more information on the image + org.opencontainers.image.url: "https://www.elastic.co/products/beats/elastic-agent" + ## Name of the distributing entity, organization or individual + org.opencontainers.image.vendor: "Elastic" + org.opencontainers.image.version: "{{ beat_version }}" + ## Keywords to help with search (ex. "cicd,gitops,golang") + mil.dso.ironbank.image.keywords: "log,metrics,monitoring,observabilty,o11y,oblt,beats,elastic,elasticsearch,golang" + ## This value can be "opensource" or "commercial" + mil.dso.ironbank.image.type: "commercial" + ## Product the image belongs to for grouping multiple images + mil.dso.ironbank.product.name: "beats" + +# List of resources to make available to the offline build context +resources: + - filename: "elastic-agent-{{ beat_version }}-linux-x86_64.tar.gz" + url: "/elastic-agent-{{ beat_version }}-linux-x86_64.tar.gz" + validation: + type: "sha512" + value: "" + - filename: tinit + url: https://github.com/krallin/tini/releases/download/v0.19.0/tini-amd64 + validation: + type: sha256 + value: 93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c + - filename: jq + url: https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + validation: + type: sha256 + value: af986793a515d500ab2d35f8d2aecd656e764504b789b66d7e1a0b727a124c44 + +# List of project maintainers +maintainers: + - email: "nassim.kammah@elastic.co" + name: "Nassim Kammah" + username: "nassim.kammah" + - email: "ivan.fernandez@elastic.co" + name: "Ivan Fernandez Calvo" + username: "ivan.fernandez" + - email: "victor.martinez@elastic.co" + name: "Victor Martinez" + username: "victor.martinez" diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl new file mode 100644 index 00000000000..083ebb91060 --- /dev/null +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +set -e + +symlink="/usr/share/elastic-agent/bin/elastic-agent" +old_agent_dir="$( dirname "$(readlink -f -- "$symlink")" )" + +commit_hash="{{ commit_short }}" + +yml_path="$old_agent_dir/state.yml" +enc_path="$old_agent_dir/state.enc" + +new_agent_dir="$( dirname "$old_agent_dir")/elastic-agent-$commit_hash" + +if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then + echo "migrate state from $old_agent_dir to $new_agent_dir" + + if test -f "$yml_path"; then + echo "found "$yml_path", copy to "$new_agent_dir"." + cp "$yml_path" "$new_agent_dir" + fi + + if test -f "$enc_path"; then + echo "found "$enc_path", copy to "$new_agent_dir"." + cp "$enc_path" "$new_agent_dir" + fi + + if test -f "$symlink"; then + echo "found symlink $symlink, unlink" + unlink "$symlink" + fi + + echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" + ln -s "$new_agent_dir/elastic-agent" "$symlink" +fi + +systemctl daemon-reload 2> /dev/null +exit 0 diff --git a/go.mod b/go.mod index 7751b77f60c..267e46602f2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 - github.com/elastic/elastic-agent-libs v0.2.3 + github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.7.1 github.com/elastic/go-ucfg v0.8.5 @@ -61,7 +61,7 @@ require ( github.com/armon/go-radix v1.0.0 // indirect github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect - github.com/containerd/containerd v1.5.10 // indirect + github.com/containerd/containerd v1.5.13 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnephin/pflag v1.0.7 // indirect @@ -115,6 +115,7 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect @@ -127,7 +128,7 @@ require ( google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 // indirect google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect diff --git a/go.sum b/go.sum index 8ccda348f78..67d8b0f1cc1 100644 --- a/go.sum +++ b/go.sum @@ -92,7 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -229,6 +229,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -250,8 +251,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= -github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= +github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -385,8 +386,8 @@ github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 h1:8sGoTlgXRCesR1+FjBv8YY5CyVhNSDjXlo4uq5q1RGM= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= -github.com/elastic/elastic-agent-libs v0.2.3 h1:GY8M0fxOs/GBY2nIB+JOB91aoD72S87iEcm2qVGFUqI= -github.com/elastic/elastic-agent-libs v0.2.3/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= +github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= +github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= github.com/elastic/go-elasticsearch/v7 v7.16.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= @@ -1251,8 +1252,9 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1553,6 +1555,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1876,8 +1879,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v1.7.0 h1:RwpqwwFKBAa2h+F6pMEGpE707Edld0etUD3GhqqhDNc= diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index a3f4ff0b3ea..0913b484712 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -159,7 +159,7 @@ func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Con errors.TypeNetwork, errors.M("hosts", h.config.Fleet.Client.Hosts)) } // discard body for proper cancellation and connection reuse - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() reader, err := fleetToReader(h.agentInfo, h.config) diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index d5bc6a182d8..bf0d0fd6444 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -70,7 +70,7 @@ func New( return nil, fmt.Errorf("failed to load configuration: %w", err) } - upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig) + upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig, agentInfo) runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) if err != nil { @@ -121,7 +121,7 @@ func New( return nil, errors.New(err, "failed to initialize composable controller") } - coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) + coord := coordinator.New(log, agentInfo, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) if managed != nil { // the coordinator requires the config manager as well as in managed-mode the config manager requires the // coordinator, so it must be set here once the coordinator is created diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index dac48400179..906b9af2d64 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -42,6 +42,9 @@ type UpgradeManager interface { // Upgradeable returns true if can be upgraded. Upgradeable() bool + // Reload reloads the configuration for the upgrade manager. + Reload(rawConfig *config.Config) error + // Upgrade upgrades running agent. Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) } @@ -129,7 +132,8 @@ type StateFetcher interface { // // All configuration changes, update variables, and upgrade actions are managed and controlled by the coordinator. type Coordinator struct { - logger *logger.Logger + logger *logger.Logger + agentInfo *info.AgentInfo specs component.RuntimeSpecs @@ -150,9 +154,10 @@ type Coordinator struct { } // New creates a new coordinator. -func New(logger *logger.Logger, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { +func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { return &Coordinator{ logger: logger, + agentInfo: agentInfo, specs: specs, reexecMgr: reexecMgr, upgradeMgr: upgradeMgr, @@ -429,6 +434,10 @@ func (c *Coordinator) processConfig(ctx context.Context, cfg *config.Config) (er } } + if err := c.upgradeMgr.Reload(cfg); err != nil { + return fmt.Errorf("failed to reload upgrade manager configuration: %w", err) + } + c.state.config = cfg c.state.ast = rawAst @@ -505,10 +514,9 @@ type coordinatorState struct { message string overrideState *coordinatorOverrideState - config *config.Config - ast *transpiler.AST - vars []*transpiler.Vars - components []component.Component + config *config.Config + ast *transpiler.AST + vars []*transpiler.Vars } type coordinatorOverrideState struct { diff --git a/internal/pkg/agent/application/coordinator/handler.go b/internal/pkg/agent/application/coordinator/handler.go new file mode 100644 index 00000000000..22130d1a776 --- /dev/null +++ b/internal/pkg/agent/application/coordinator/handler.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package coordinator + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" +) + +// LivenessResponse is the response body for the liveness endpoint. +type LivenessResponse struct { + ID string `json:"id"` + Status string `json:"status"` + Message string `json:"message"` + UpdateTime time.Time `json:"update_timestamp"` +} + +// ServeHTTP is an HTTP Handler for the coordinatorr. +// Response code is 200 for a healthy agent, and 503 otherwise. +// Response body is a JSON object that contains the agent ID, status, message, and the last status update time. +func (c *Coordinator) ServeHTTP(wr http.ResponseWriter, req *http.Request) { + s := c.State() + lr := LivenessResponse{ + ID: c.agentInfo.AgentID(), + Status: s.State.String(), + Message: s.Message, + + // TODO(blakerouse): Coordinator should be changed to store the last timestamp that the state has changed. + UpdateTime: time.Now().UTC(), + } + status := http.StatusOK + if s.State != client.Healthy { + status = http.StatusServiceUnavailable + } + + wr.Header().Set("Content-Type", "application/json") + wr.WriteHeader(status) + enc := json.NewEncoder(wr) + if err := enc.Encode(lr); err != nil { + c.logger.Errorf("Unable to encode liveness response: %v", err) + } +} diff --git a/internal/pkg/agent/application/info/agent_id.go b/internal/pkg/agent/application/info/agent_id.go index e0a6c64acbe..8056fd0cce1 100644 --- a/internal/pkg/agent/application/info/agent_id.go +++ b/internal/pkg/agent/application/info/agent_id.go @@ -71,7 +71,7 @@ func getInfoFromStore(s ioStore, logLevel string) (*persistentAgentInfo, error) agentConfigFile := paths.AgentConfigFile() reader, err := s.Load() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load from ioStore: %w", err) } // reader is closed by this function @@ -203,20 +203,20 @@ func loadAgentInfo(forceUpdate bool, logLevel string, createAgentID bool) (*pers agentConfigFile := paths.AgentConfigFile() diskStore := storage.NewEncryptedDiskStore(agentConfigFile) - agentinfo, err := getInfoFromStore(diskStore, logLevel) + agentInfo, err := getInfoFromStore(diskStore, logLevel) if err != nil { - return nil, err + return nil, fmt.Errorf("could not get agent info from store: %w", err) } - if agentinfo != nil && !forceUpdate && (agentinfo.ID != "" || !createAgentID) { - return agentinfo, nil + if agentInfo != nil && !forceUpdate && (agentInfo.ID != "" || !createAgentID) { + return agentInfo, nil } - if err := updateID(agentinfo, diskStore); err != nil { - return nil, err + if err := updateID(agentInfo, diskStore); err != nil { + return nil, fmt.Errorf("could not update agent ID on disk store: %w", err) } - return agentinfo, nil + return agentInfo, nil } func updateID(agentInfo *persistentAgentInfo, s ioStore) error { diff --git a/internal/pkg/agent/application/info/agent_metadata.go b/internal/pkg/agent/application/info/agent_metadata.go index a532487a446..49afeca9dc7 100644 --- a/internal/pkg/agent/application/info/agent_metadata.go +++ b/internal/pkg/agent/application/info/agent_metadata.go @@ -10,10 +10,11 @@ import ( "runtime" "strings" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/go-sysinfo" "github.com/elastic/go-sysinfo/types" + + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/release" ) // ECSMeta is a collection of agent related metadata in ECS compliant object form. @@ -123,7 +124,7 @@ const ( func Metadata() (*ECSMeta, error) { agentInfo, err := NewAgentInfo(false) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create new agent info: %w", err) } meta, err := agentInfo.ECSMetadata() diff --git a/internal/pkg/agent/application/paths/paths_linux.go b/internal/pkg/agent/application/paths/paths_linux.go index 22faeb5f75a..37cc57c33af 100644 --- a/internal/pkg/agent/application/paths/paths_linux.go +++ b/internal/pkg/agent/application/paths/paths_linux.go @@ -14,5 +14,5 @@ const defaultAgentVaultPath = "vault" // AgentVaultPath is the directory that contains all the files for the value func AgentVaultPath() string { - return filepath.Join(Home(), defaultAgentVaultPath) + return filepath.Join(Config(), defaultAgentVaultPath) } diff --git a/internal/pkg/agent/application/paths/paths_windows.go b/internal/pkg/agent/application/paths/paths_windows.go index 2fc6fd008a0..0b81aa2061b 100644 --- a/internal/pkg/agent/application/paths/paths_windows.go +++ b/internal/pkg/agent/application/paths/paths_windows.go @@ -42,5 +42,5 @@ func ArePathsEqual(expected, actual string) bool { // AgentVaultPath is the directory that contains all the files for the value func AgentVaultPath() string { - return filepath.Join(Home(), defaultAgentVaultPath) + return filepath.Join(Config(), defaultAgentVaultPath) } diff --git a/internal/pkg/agent/application/secret/secret.go b/internal/pkg/agent/application/secret/secret.go index edce9eda174..bd2ee546454 100644 --- a/internal/pkg/agent/application/secret/secret.go +++ b/internal/pkg/agent/application/secret/secret.go @@ -2,10 +2,12 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +// Package secret manages application secrets. package secret import ( "encoding/json" + "fmt" "runtime" "sync" "time" @@ -52,7 +54,7 @@ func Create(key string, opts ...OptionFunc) error { options := applyOptions(opts...) v, err := vault.New(options.vaultPath) if err != nil { - return err + return fmt.Errorf("could not create new vault: %w", err) } defer v.Close() @@ -80,12 +82,7 @@ func Create(key string, opts ...OptionFunc) error { CreatedOn: time.Now().UTC(), } - b, err := json.Marshal(secret) - if err != nil { - return err - } - - return v.Set(key, b) + return set(v, key, secret) } // GetAgentSecret read the agent secret from the vault @@ -93,10 +90,17 @@ func GetAgentSecret(opts ...OptionFunc) (secret Secret, err error) { return Get(agentSecretKey, opts...) } +// SetAgentSecret saves the agent secret from the vault +// This is needed for migration from 8.3.0-8.3.2 to higher versions +func SetAgentSecret(secret Secret, opts ...OptionFunc) error { + return Set(agentSecretKey, secret, opts...) +} + // Get reads the secret key from the vault func Get(key string, opts ...OptionFunc) (secret Secret, err error) { options := applyOptions(opts...) - v, err := vault.New(options.vaultPath) + // open vault readonly, will not create the vault directory or the seed it was not created before + v, err := vault.New(options.vaultPath, vault.WithReadonly(true)) if err != nil { return secret, err } @@ -111,12 +115,32 @@ func Get(key string, opts ...OptionFunc) (secret Secret, err error) { return secret, err } +// Set saves the secret key to the vault +func Set(key string, secret Secret, opts ...OptionFunc) error { + options := applyOptions(opts...) + v, err := vault.New(options.vaultPath) + if err != nil { + return fmt.Errorf("could not create new vault: %w", err) + } + defer v.Close() + return set(v, key, secret) +} + +func set(v *vault.Vault, key string, secret Secret) error { + b, err := json.Marshal(secret) + if err != nil { + return fmt.Errorf("could not marshal secret: %w", err) + } + + return v.Set(key, b) +} + // Remove removes the secret key from the vault func Remove(key string, opts ...OptionFunc) error { options := applyOptions(opts...) v, err := vault.New(options.vaultPath) if err != nil { - return err + return fmt.Errorf("could not create new vault: %w", err) } defer v.Close() diff --git a/internal/pkg/agent/application/upgrade/artifact/config.go b/internal/pkg/agent/application/upgrade/artifact/config.go index c190c02d239..6db38fa612c 100644 --- a/internal/pkg/agent/application/upgrade/artifact/config.go +++ b/internal/pkg/agent/application/upgrade/artifact/config.go @@ -17,6 +17,9 @@ const ( darwin = "darwin" linux = "linux" windows = "windows" + + // DefaultSourceURI is the default source URI for downloading artifacts. + DefaultSourceURI = "https://artifacts.elastic.co/downloads/" ) // Config is a configuration used for verifier and downloader @@ -56,7 +59,7 @@ func DefaultConfig() *Config { transport.Timeout = 10 * time.Minute return &Config{ - SourceURI: "https://artifacts.elastic.co/downloads/", + SourceURI: DefaultSourceURI, TargetDirectory: paths.Downloads(), InstallPath: paths.Install(), HTTPTransportSettings: transport, diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 5a6762b40fc..11784e2d0f5 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -65,9 +65,9 @@ func TestDownloadBodyError(t *testing.T) { } require.GreaterOrEqual(t, len(log.info), 1, "download error not logged at info level") - assert.Equal(t, log.info[len(log.info)-1].record, "download from %s failed at %s @ %sps: %s") + assert.True(t, containsMessage(log.info, "download from %s failed at %s @ %sps: %s")) require.GreaterOrEqual(t, len(log.warn), 1, "download error not logged at warn level") - assert.Equal(t, log.warn[len(log.warn)-1].record, "download from %s failed at %s @ %sps: %s") + assert.True(t, containsMessage(log.warn, "download from %s failed at %s @ %sps: %s")) } func TestDownloadLogProgressWithLength(t *testing.T) { @@ -208,3 +208,12 @@ func (f *recordLogger) Warnf(record string, args ...interface{}) { defer f.lock.Unlock() f.warn = append(f.warn, logMessage{record, args}) } + +func containsMessage(logs []logMessage, msg string) bool { + for _, item := range logs { + if item.record == msg { + return true + } + } + return false +} diff --git a/internal/pkg/agent/application/upgrade/cleanup.go b/internal/pkg/agent/application/upgrade/cleanup.go new file mode 100644 index 00000000000..5e0618dfe78 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/cleanup.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/go-multierror" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +// preUpgradeCleanup will remove files that do not have the passed version number from the downloads directory. +func preUpgradeCleanup(version string) error { + files, err := os.ReadDir(paths.Downloads()) + if err != nil { + return fmt.Errorf("unable to read directory %q: %w", paths.Downloads(), err) + } + var rErr error + for _, file := range files { + if file.IsDir() { + continue + } + if !strings.Contains(file.Name(), version) { + if err := os.Remove(filepath.Join(paths.Downloads(), file.Name())); err != nil { + rErr = multierror.Append(rErr, fmt.Errorf("unable to remove file %q: %w", filepath.Join(paths.Downloads(), file.Name()), err)) + } + } + } + return rErr +} diff --git a/internal/pkg/agent/application/upgrade/cleanup_test.go b/internal/pkg/agent/application/upgrade/cleanup_test.go new file mode 100644 index 00000000000..736a9c42b3d --- /dev/null +++ b/internal/pkg/agent/application/upgrade/cleanup_test.go @@ -0,0 +1,44 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "os" + "path/filepath" + "testing" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + + "github.com/stretchr/testify/require" +) + +func setupDir(t *testing.T) { + t.Helper() + dir := t.TempDir() + paths.SetDownloads(dir) + + err := os.WriteFile(filepath.Join(dir, "test-8.3.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-8.4.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-8.5.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-hash-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) +} + +func TestPreUpgradeCleanup(t *testing.T) { + setupDir(t) + err := preUpgradeCleanup("8.4.0") + require.NoError(t, err) + + files, err := os.ReadDir(paths.Downloads()) + require.NoError(t, err) + require.Len(t, files, 1) + require.Equal(t, "test-8.4.0-file", files[0].Name()) + p, err := os.ReadFile(filepath.Join(paths.Downloads(), files[0].Name())) + require.NoError(t, err) + require.Equal(t, []byte("hello, world!"), p) +} diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 51b0adbb184..7757ff6a9a1 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -38,6 +38,58 @@ type UpdateMarker struct { Action *fleetapi.ActionUpgrade `json:"action" yaml:"action"` } +// MarkerActionUpgrade adapter struct compatible with pre 8.3 version of the marker file format +type MarkerActionUpgrade struct { + ActionID string `yaml:"id"` + ActionType string `yaml:"type"` + Version string `yaml:"version"` + SourceURI string `yaml:"source_uri,omitempty"` +} + +func convertToMarkerAction(a *fleetapi.ActionUpgrade) *MarkerActionUpgrade { + if a == nil { + return nil + } + return &MarkerActionUpgrade{ + ActionID: a.ActionID, + ActionType: a.ActionType, + Version: a.Version, + SourceURI: a.SourceURI, + } +} + +func convertToActionUpgrade(a *MarkerActionUpgrade) *fleetapi.ActionUpgrade { + if a == nil { + return nil + } + return &fleetapi.ActionUpgrade{ + ActionID: a.ActionID, + ActionType: a.ActionType, + Version: a.Version, + SourceURI: a.SourceURI, + } +} + +type updateMarkerSerializer struct { + Hash string `yaml:"hash"` + UpdatedOn time.Time `yaml:"updated_on"` + PrevVersion string `yaml:"prev_version"` + PrevHash string `yaml:"prev_hash"` + Acked bool `yaml:"acked"` + Action *MarkerActionUpgrade `yaml:"action"` +} + +func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { + return &updateMarkerSerializer{ + Hash: m.Hash, + UpdatedOn: m.UpdatedOn, + PrevVersion: m.PrevVersion, + PrevHash: m.PrevHash, + Acked: m.Acked, + Action: convertToMarkerAction(m.Action), + } +} + // markUpgrade marks update happened so we can handle grace period func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() @@ -46,7 +98,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. prevHash = prevHash[:hashLen] } - marker := UpdateMarker{ + marker := &UpdateMarker{ Hash: hash, UpdatedOn: time.Now(), PrevVersion: prevVersion, @@ -54,7 +106,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. Action: action, } - markerBytes, err := yaml.Marshal(marker) + markerBytes, err := yaml.Marshal(newMarkerSerializer(marker)) if err != nil { return errors.New(err, errors.TypeConfig, "failed to parse marker file") } @@ -103,16 +155,31 @@ func LoadMarker() (*UpdateMarker, error) { return nil, err } - marker := &UpdateMarker{} + marker := &updateMarkerSerializer{} if err := yaml.Unmarshal(markerBytes, &marker); err != nil { return nil, err } - return marker, nil + return &UpdateMarker{ + Hash: marker.Hash, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + Acked: marker.Acked, + Action: convertToActionUpgrade(marker.Action), + }, nil } func saveMarker(marker *UpdateMarker) error { - markerBytes, err := yaml.Marshal(marker) + makerSerializer := &updateMarkerSerializer{ + Hash: marker.Hash, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + Acked: marker.Acked, + Action: convertToMarkerAction(marker.Action), + } + markerBytes, err := yaml.Marshal(makerSerializer) if err != nil { return err } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 444927a6052..edc70c3f5c0 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -5,25 +5,23 @@ package upgrade import ( - "bytes" "context" "fmt" "io/ioutil" "os" "path/filepath" - "runtime" "strings" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/otiai10/copy" "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/release" @@ -34,12 +32,11 @@ const ( agentName = "elastic-agent" hashLen = 6 agentCommitFile = ".elastic-agent.active.commit" - darwin = "darwin" ) var ( agentArtifact = artifact.Artifact{ - Name: "elastic-agent", + Name: "Elastic Agent", Cmd: agentName, Artifact: "beats/" + agentName, } @@ -54,6 +51,7 @@ var ( type Upgrader struct { log *logger.Logger settings *artifact.Config + agentInfo *info.AgentInfo upgradeable bool } @@ -65,28 +63,72 @@ func IsUpgradeable() bool { } // NewUpgrader creates an upgrader which is capable of performing upgrade operation -func NewUpgrader(log *logger.Logger, settings *artifact.Config) *Upgrader { +func NewUpgrader(log *logger.Logger, settings *artifact.Config, agentInfo *info.AgentInfo) *Upgrader { return &Upgrader{ log: log, settings: settings, + agentInfo: agentInfo, upgradeable: IsUpgradeable(), } } +// Reload reloads the artifact configuration for the upgrader. +func (u *Upgrader) Reload(rawConfig *config.Config) error { + type reloadConfig struct { + // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ + SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` + + // FleetSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ coming from fleet which uses + // different naming. + FleetSourceURI string `json:"agent.download.source_uri" config:"agent.download.source_uri"` + } + cfg := &reloadConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return errors.New(err, "failed to unpack config during reload") + } + + var newSourceURI string + if cfg.FleetSourceURI != "" { + // fleet configuration takes precedence + newSourceURI = cfg.FleetSourceURI + } else if cfg.SourceURI != "" { + newSourceURI = cfg.SourceURI + } + + if newSourceURI != "" { + u.log.Infof("Source URI changed from %q to %q", u.settings.SourceURI, newSourceURI) + u.settings.SourceURI = newSourceURI + } else { + // source uri unset, reset to default + u.log.Infof("Source URI reset from %q to %q", u.settings.SourceURI, artifact.DefaultSourceURI) + u.settings.SourceURI = artifact.DefaultSourceURI + } + return nil +} + // Upgradeable returns true if the Elastic Agent can be upgraded. func (u *Upgrader) Upgradeable() bool { return u.upgradeable } -// Upgrade upgrades running agent, function returns shutdown callback if some needs to be executed for cases when -// reexec is called by caller. +// Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() + err = preUpgradeCleanup(u.agentInfo.Version()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q before update: %v", paths.Downloads(), err) + } + sourceURI = u.sourceURI(sourceURI) archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { + // Run the same preUpgradeCleanup task to get rid of any newly downloaded files + // This may have an issue if users are upgrading to the same version number. + if dErr := preUpgradeCleanup(u.agentInfo.Version()); dErr != nil { + u.log.Errorf("Unable to remove file after verification failure: %v", dErr) + } return nil, err } @@ -103,19 +145,10 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, ErrSameVersion } - // Copy vault directory for linux/windows only - if err := copyVault(newHash); err != nil { - return nil, errors.New(err, "failed to copy vault") - } - if err := copyActionStore(newHash); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := encryptConfigIfNeeded(u.log, newHash); err != nil { - return nil, errors.New(err, "failed to encrypt the configuration") - } - if err := ChangeSymlink(ctx, newHash); err != nil { rollbackInstall(ctx, newHash) return nil, err @@ -132,6 +165,13 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string } cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) + + // Clean everything from the downloads dir + err = os.RemoveAll(paths.Downloads()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + } + return cb, nil } @@ -158,6 +198,8 @@ func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { return err } + marker.Acked = true + return saveMarker(marker) } @@ -199,103 +241,6 @@ func copyActionStore(newHash string) error { return nil } -func getVaultPath(newHash string) string { - vaultPath := paths.AgentVaultPath() - if runtime.GOOS == darwin { - return vaultPath - } - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) - return filepath.Join(newHome, filepath.Base(vaultPath)) -} - -// Copies the vault files for windows and linux -func copyVault(newHash string) error { - // No vault files to copy on darwin - if runtime.GOOS == darwin { - return nil - } - - vaultPath := paths.AgentVaultPath() - newVaultPath := getVaultPath(newHash) - - err := copyDir(vaultPath, newVaultPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - return nil -} - -// Create the key if it doesn't exist and encrypt the fleet.yml and state.yml -func encryptConfigIfNeeded(log *logger.Logger, newHash string) (err error) { - vaultPath := getVaultPath(newHash) - - err = secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) - if err != nil { - return err - } - - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) - ymlStateStorePath := filepath.Join(newHome, filepath.Base(paths.AgentStateStoreYmlFile())) - stateStorePath := filepath.Join(newHome, filepath.Base(paths.AgentStateStoreFile())) - - files := []struct { - Src string - Dst string - }{ - { - Src: ymlStateStorePath, - Dst: stateStorePath, - }, - { - Src: paths.AgentConfigYmlFile(), - Dst: paths.AgentConfigFile(), - }, - } - for _, f := range files { - var b []byte - b, err = ioutil.ReadFile(f.Src) - if err != nil { - if os.IsNotExist(err) { - continue - } - return err - } - - // Encrypt yml file - store := storage.NewEncryptedDiskStore(f.Dst, storage.WithVaultPath(vaultPath)) - err = store.Save(bytes.NewReader(b)) - if err != nil { - return err - } - - // Remove yml file if no errors - defer func(fp string) { - if err != nil { - return - } - if rerr := os.Remove(fp); rerr != nil { - log.Warnf("failed to remove file: %s, err: %v", fp, rerr) - } - }(f.Src) - } - - // Do not remove AgentConfigYmlFile lock file if any error happened. - if err != nil { - return err - } - - lockFp := paths.AgentConfigYmlFile() + ".lock" - if rerr := os.Remove(lockFp); rerr != nil { - log.Warnf("failed to remove file: %s, err: %v", lockFp, rerr) - } - - return err -} - // shutdownCallback returns a callback function to be executing during shutdown once all processes are closed. // this goes through runtime directory of agent and copies all the state files created by processes to new versioned // home directory with updated process name to match new version. diff --git a/internal/pkg/agent/cleaner/cleaner.go b/internal/pkg/agent/cleaner/cleaner.go new file mode 100644 index 00000000000..856ae020b89 --- /dev/null +++ b/internal/pkg/agent/cleaner/cleaner.go @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cleaner + +import ( + "context" + "os" + "sync" + "time" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" +) + +// Wait interval. +// If the watchFile was not modified after this interval, then remove all the files in the removeFiles array +const defaultCleanWait = 15 * time.Minute + +type Cleaner struct { + log *logp.Logger + watchFile string + removeFiles []string + cleanWait time.Duration + + mx sync.Mutex +} + +type OptionFunc func(c *Cleaner) + +func New(log *logp.Logger, watchFile string, removeFiles []string, opts ...OptionFunc) *Cleaner { + c := &Cleaner{ + log: log, + watchFile: watchFile, + removeFiles: removeFiles, + cleanWait: defaultCleanWait, + } + + for _, opt := range opts { + opt(c) + } + return c +} + +func WithCleanWait(cleanWait time.Duration) OptionFunc { + return func(c *Cleaner) { + c.cleanWait = cleanWait + } +} + +func (c *Cleaner) Run(ctx context.Context) error { + wait, done, err := c.process() + if err != nil { + return err + } + + if done { + return nil + } + + t := time.NewTimer(wait) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return nil + case <-t.C: + c.log.Debug("cleaner: timer triggered") + wait, done, err = c.process() + if err != nil { + return err + } + + if done { + return nil + } + t.Reset(wait) + } + } +} + +func (c *Cleaner) process() (wait time.Duration, done bool, err error) { + modTime, err := fileutil.GetModTime(c.watchFile) + if err != nil { + return + } + + c.log.Debugf("cleaner: check file %s mod time: %v", c.watchFile, modTime) + curDur := time.Since(modTime) + if curDur > c.cleanWait { + c.log.Debugf("cleaner: file %s modification expired", c.watchFile) + c.deleteFiles() + return wait, true, nil + } + wait = c.cleanWait - curDur + return wait, false, nil +} + +func (c *Cleaner) deleteFiles() { + c.log.Debugf("cleaner: delete files: %v", c.removeFiles) + c.mx.Lock() + defer c.mx.Unlock() + for _, fp := range c.removeFiles { + c.log.Debugf("cleaner: delete file: %v", fp) + err := os.Remove(fp) + if err != nil { + c.log.Warnf("cleaner: delete file %v failed: %v", fp, err) + } + } +} diff --git a/internal/pkg/agent/cleaner/cleaner_test.go b/internal/pkg/agent/cleaner/cleaner_test.go new file mode 100644 index 00000000000..cf189b784d3 --- /dev/null +++ b/internal/pkg/agent/cleaner/cleaner_test.go @@ -0,0 +1,68 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cleaner + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func TestCleaner(t *testing.T) { + // Setup + const watchFileName = "fleet.enc" + removeFiles := []string{"fleet.yml", "fleet.yml.lock"} + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + dir := t.TempDir() + watchFilePath := filepath.Join(dir, watchFileName) + + removeFilePaths := make([]string, len(removeFiles)) + + checkDir(t, dir, 0) + + // Create files + err := ioutil.WriteFile(watchFilePath, []byte{}, 0600) + if err != nil { + t.Fatal(err) + } + + for i, fn := range removeFiles { + removeFilePaths[i] = filepath.Join(dir, fn) + err := ioutil.WriteFile(removeFilePaths[i], []byte{}, 0600) + if err != nil { + t.Fatal(err) + } + } + + checkDir(t, dir, len(removeFiles)+1) + + log := logp.NewLogger("dynamic") + cleaner := New(log, watchFilePath, removeFilePaths, WithCleanWait(500*time.Millisecond)) + err = cleaner.Run(ctx) + if err != nil { + t.Fatal(err) + } + checkDir(t, dir, 1) +} + +func checkDir(t *testing.T, dir string, expectedCount int) { + t.Helper() + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(entries) != expectedCount { + t.Fatalf("Dir %s expected %d entries, found %d", dir, expectedCount, len(entries)) + } +} diff --git a/internal/pkg/agent/cmd/diagnostics_test.go b/internal/pkg/agent/cmd/diagnostics_test.go index cec6a6f3450..99d98ef78de 100644 --- a/internal/pkg/agent/cmd/diagnostics_test.go +++ b/internal/pkg/agent/cmd/diagnostics_test.go @@ -17,10 +17,12 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent-libs/transport/tlscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) var testDiagnostics = DiagnosticsInfo{ @@ -31,7 +33,7 @@ var testDiagnostics = DiagnosticsInfo{ BuildTime: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), Snapshot: false, }, - ProcMeta: []client.ProcMeta{client.ProcMeta{ + ProcMeta: []client.ProcMeta{{ Process: "filebeat", Name: "filebeat", Hostname: "test-host", @@ -46,7 +48,7 @@ var testDiagnostics = DiagnosticsInfo{ BinaryArchitecture: "test-architecture", RouteKey: "test", ElasticLicensed: true, - }, client.ProcMeta{ + }, { Process: "filebeat", Name: "filebeat_monitoring", Hostname: "test-host", @@ -61,7 +63,7 @@ var testDiagnostics = DiagnosticsInfo{ BinaryArchitecture: "test-architecture", RouteKey: "test", ElasticLicensed: true, - }, client.ProcMeta{ + }, { Name: "metricbeat", RouteKey: "test", Error: "failed to get metricbeat data", @@ -138,4 +140,77 @@ func Test_collectEndpointSecurityLogs_noEndpointSecurity(t *testing.T) { err := collectEndpointSecurityLogs(zw, specs) assert.NoError(t, err, "collectEndpointSecurityLogs should not return an error") } + +func Test_redact(t *testing.T) { + tests := []struct { + name string + arg interface{} + wantRedacted []string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "tlscommon.Config", + arg: tlscommon.Config{ + Enabled: nil, + VerificationMode: 0, + Versions: nil, + CipherSuites: nil, + CAs: []string{"ca1", "ca2"}, + Certificate: tlscommon.CertificateConfig{ + Certificate: "Certificate", + Key: "Key", + Passphrase: "Passphrase", + }, + CurveTypes: nil, + Renegotiation: 0, + CASha256: nil, + CATrustedFingerprint: "", + }, + wantRedacted: []string{ + "certificate", "key", "key_passphrase", "certificate_authorities"}, + }, + { + name: "some map", + arg: map[string]interface{}{ + "s": "sss", + "some_key": "hey, a key!", + "a_password": "changeme", + "my_token": "a_token", + "nested": map[string]string{ + "4242": "4242", + "4242key": "4242key", + "4242password": "4242password", + "4242certificate": "4242certificate", + }, + }, + wantRedacted: []string{ + "some_key", "a_password", "my_token", "4242key", "4242password", "4242certificate"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := redact(tt.arg) + require.NoError(t, err) + + for k, v := range got { + if contains(tt.wantRedacted, k) { + assert.Equal(t, v, REDACTED) + } else { + assert.NotEqual(t, v, REDACTED) + } + } + }) + } +} + +func contains(list []string, val string) bool { + for _, k := range list { + if val == k { + return true + } + } + + return false +} */ diff --git a/internal/pkg/agent/install/svc.go b/internal/pkg/agent/install/svc.go index 7148f4acca0..1a3bf50c896 100644 --- a/internal/pkg/agent/install/svc.go +++ b/internal/pkg/agent/install/svc.go @@ -6,6 +6,7 @@ package install import ( "path/filepath" + "runtime" "github.com/kardianos/service" @@ -18,6 +19,12 @@ const ( // ServiceDescription is the description for the service. ServiceDescription = "Elastic Agent is a unified agent to observe, monitor and protect your system." + + // Set the launch daemon ExitTimeOut to 60 seconds in order to allow the agent to shutdown gracefully + // At the moment the version 8.3 & 8.4 of the agent are taking about 11 secs to shutdown + // and the launchd sends SIGKILL after 5 secs which causes the beats processes to be left running orphaned + // depending on the shutdown timing. + darwinServiceExitTimeout = 60 ) // ExecutablePath returns the path for the installed Agents executable. @@ -30,7 +37,7 @@ func ExecutablePath() string { } func newService() (service.Service, error) { - return service.New(nil, &service.Config{ + cfg := &service.Config{ Name: paths.ServiceName, DisplayName: ServiceDisplayName, Description: ServiceDescription, @@ -45,5 +52,57 @@ func newService() (service.Service, error) { "OnFailureDelayDuration": "1s", "OnFailureResetPeriod": 10, }, - }) + } + + if runtime.GOOS == "darwin" { + // The github.com/kardianos/service library doesn't support ExitTimeOut in their prebuilt template. + // This option allows to pass our own template for the launch daemon plist, which is a copy + // of the prebuilt template with added ExitTimeOut option + cfg.Option["LaunchdConfig"] = darwinLaunchdConfig + cfg.Option["ExitTimeOut"] = darwinServiceExitTimeout + } + + return service.New(nil, cfg) } + +// A copy of the launchd plist template from github.com/kardianos/service +// with added .Config.Option.ExitTimeOut option +const darwinLaunchdConfig = ` + + + + Label + {{html .Name}} + ProgramArguments + + {{html .Path}} + {{range .Config.Arguments}} + {{html .}} + {{end}} + + {{if .UserName}}UserName + {{html .UserName}}{{end}} + {{if .ChRoot}}RootDirectory + {{html .ChRoot}}{{end}} + {{if .Config.Option.ExitTimeOut}}ExitTimeOut + {{html .Config.Option.ExitTimeOut}}{{end}} + {{if .WorkingDirectory}}WorkingDirectory + {{html .WorkingDirectory}}{{end}} + SessionCreate + <{{bool .SessionCreate}}/> + KeepAlive + <{{bool .KeepAlive}}/> + RunAtLoad + <{{bool .RunAtLoad}}/> + Disabled + + + StandardOutPath + /usr/local/var/log/{{html .Name}}.out.log + StandardErrorPath + /usr/local/var/log/{{html .Name}}.err.log + + + +` diff --git a/internal/pkg/agent/migration/migrate_secret.go b/internal/pkg/agent/migration/migrate_secret.go new file mode 100644 index 00000000000..08cfc3e5eb1 --- /dev/null +++ b/internal/pkg/agent/migration/migrate_secret.go @@ -0,0 +1,163 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package migration + +import ( + "errors" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" +) + +const ( + darwin = "darwin" +) + +// MigrateAgentSecret migrates agent secret if the secret doesn't exists agent upgrade from 8.3.0 - 8.3.2 to 8.x and above on Linux and Windows platforms. +func MigrateAgentSecret(log *logp.Logger) error { + // Nothing to migrate for darwin + if runtime.GOOS == darwin { + return nil + } + + // Check if the secret already exists + log.Debug("migrate agent secret, check if secret already exists") + _, err := secret.GetAgentSecret() + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret doesn't exists, perform migration below + log.Debug("agent secret doesn't exists, perform migration") + } else { + err = fmt.Errorf("failed read the agent secret: %w", err) + log.Error(err) + return err + } + } else { + // The secret already exists, nothing to migrate + log.Debug("secret already exists nothing to migrate") + return nil + } + + // Check if the secret was copied by the fleet upgrade handler to the legacy location + log.Debug("check if secret was copied over by 8.3.0-8.3.2 version of the agent") + sec, err := getAgentSecretFromHomePath(paths.Home()) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret is not found in this instance of the vault, continue with migration + log.Debug("agent secret copied from 8.3.0-8.3.2 doesn't exists, continue with migration") + } else { + err = fmt.Errorf("failed agent 8.3.0-8.3.2 secret check: %w", err) + log.Error(err) + return err + } + } else { + // The secret is found, save in the new agent vault + log.Debug("agent secret from 8.3.0-8.3.2 is found, migrate to the new vault") + return secret.SetAgentSecret(sec) + } + + // Scan other agent data directories, find the latest agent secret + log.Debug("search for possible latest agent 8.3.0-8.3.2 secret") + dataDir := paths.Data() + + sec, err = findPreviousAgentSecret(dataDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret is not found + log.Debug("no previous agent 8.3.0-8.3.2 secrets found, nothing to migrate") + return nil + } + err = fmt.Errorf("search for possible latest agent 8.3.0-8.3.2 secret failed: %w", err) + log.Error(err) + return err + } + log.Debug("found previous agent 8.3.0-8.3.2 secret, migrate to the new vault") + return secret.SetAgentSecret(sec) +} + +func findPreviousAgentSecret(dataDir string) (secret.Secret, error) { + found := false + var sec secret.Secret + fileSystem := os.DirFS(dataDir) + _ = fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + if strings.HasPrefix(d.Name(), "elastic-agent-") { + vaultPath := getLegacyVaultPathFromPath(filepath.Join(dataDir, path)) + s, err := secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + // Ignore if fs.ErrNotExist error, keep scanning + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + + // Check that the configuration can be decrypted with the found agent secret + exists, _ := fileutil.FileExists(paths.AgentConfigFile()) + if exists { + store := storage.NewEncryptedDiskStore(paths.AgentConfigFile(), storage.WithVaultPath(vaultPath)) + r, err := store.Load() + if err != nil { + //nolint:nilerr // ignore the error keep scanning + return nil + } + + defer r.Close() + _, err = ioutil.ReadAll(r) + if err != nil { + //nolint:nilerr // ignore the error keep scanning + return nil + } + + sec = s + found = true + return io.EOF + } + } else if d.Name() != "." { + return fs.SkipDir + } + } + return nil + }) + if !found { + return sec, fs.ErrNotExist + } + return sec, nil +} + +func getAgentSecretFromHomePath(homePath string) (sec secret.Secret, err error) { + vaultPath := getLegacyVaultPathFromPath(homePath) + fi, err := os.Stat(vaultPath) + if err != nil { + return + } + + if !fi.IsDir() { + return sec, fs.ErrNotExist + } + return secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) +} + +func getLegacyVaultPath() string { + return getLegacyVaultPathFromPath(paths.Home()) +} + +func getLegacyVaultPathFromPath(path string) string { + return filepath.Join(path, "vault") +} diff --git a/internal/pkg/agent/migration/migrate_secret_test.go b/internal/pkg/agent/migration/migrate_secret_test.go new file mode 100644 index 00000000000..c6dfeb1781c --- /dev/null +++ b/internal/pkg/agent/migration/migrate_secret_test.go @@ -0,0 +1,387 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux || windows +// +build linux windows + +package migration + +import ( + "errors" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/agent/vault" +) + +func TestFindAgentSecretFromHomePath(t *testing.T) { + + tests := []struct { + name string + setupFn func(homePath string) error + wantErr error + }{ + { + name: "no data dir", + wantErr: fs.ErrNotExist, + }, + { + name: "no vault dir", + setupFn: func(homePath string) error { + return os.MkdirAll(homePath, 0750) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault file instead of directory", + setupFn: func(homePath string) error { + err := os.MkdirAll(homePath, 0750) + if err != nil { + return err + } + return ioutil.WriteFile(getLegacyVaultPathFromPath(homePath), []byte{}, 0600) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "empty vault directory", + setupFn: func(homePath string) error { + return os.MkdirAll(getLegacyVaultPathFromPath(homePath), 0750) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "empty vault", + setupFn: func(homePath string) error { + v, err := vault.New(getLegacyVaultPathFromPath(homePath)) + if err != nil { + return err + } + defer v.Close() + return nil + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault dir with no seed", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + v, err := vault.New(vaultPath) + if err != nil { + return err + } + defer v.Close() + return os.Remove(filepath.Join(vaultPath, ".seed")) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault with secret and misplaced seed vault", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + return err + } + return os.Remove(filepath.Join(vaultPath, ".seed")) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault with valid secret", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + return err + } + return generateTestConfig(vaultPath) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + homePath := paths.Home() + + if tc.setupFn != nil { + if err := tc.setupFn(homePath); err != nil { + t.Fatal(err) + } + } + + sec, err := getAgentSecretFromHomePath(homePath) + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + + foundSec, err := findPreviousAgentSecret(filepath.Dir(homePath)) + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + diff := cmp.Diff(sec, foundSec) + if diff != "" { + t.Fatal(diff) + } + + }) + } +} + +type configType int + +const ( + NoConfig configType = iota + MatchingConfig + NonMatchingConfig +) + +func TestFindNewestAgentSecret(t *testing.T) { + + tests := []struct { + name string + cfgType configType + wantErr error + }{ + { + name: "missing config", + cfgType: NoConfig, + wantErr: fs.ErrNotExist, + }, + { + name: "matching config", + cfgType: MatchingConfig, + }, + { + name: "non-matching config", + cfgType: NonMatchingConfig, + wantErr: fs.ErrNotExist, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + dataDir := paths.Data() + + wantSecret, err := generateTestSecrets(dataDir, 3, tc.cfgType) + if err != nil { + t.Fatal(err) + } + sec, err := findPreviousAgentSecret(dataDir) + + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func TestMigrateAgentSecret(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + dataDir := paths.Data() + + // No vault home path + homePath := generateTestHomePath(dataDir) + if err := os.MkdirAll(homePath, 0750); err != nil { + t.Fatal(err) + } + + // Empty vault home path + homePath = generateTestHomePath(dataDir) + vaultPath := getLegacyVaultPathFromPath(homePath) + if err := os.MkdirAll(vaultPath, 0750); err != nil { + t.Fatal(err) + } + + // Vault with missing seed + homePath = generateTestHomePath(dataDir) + vaultPath = getLegacyVaultPathFromPath(homePath) + v, err := vault.New(vaultPath) + if err != nil { + t.Fatal(err) + } + defer v.Close() + + if err = os.Remove(filepath.Join(vaultPath, ".seed")); err != nil { + t.Fatal(err) + } + + // Generate few valid secrets to scan for + wantSecret, err := generateTestSecrets(dataDir, 5, MatchingConfig) + if err != nil { + t.Fatal(err) + } + + // Expect no agent secret found + _, err = secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if !errors.Is(err, fs.ErrNotExist) { + t.Fatalf("expected err: %v", fs.ErrNotExist) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + // Expect the agent secret is migrated now + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare the migrated secret with the expected newest one + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func TestMigrateAgentSecretAlreadyExists(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + err := secret.CreateAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Expect agent secret created + wantSecret, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare, should be the same secret + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func TestMigrateAgentSecretFromLegacyLocation(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + vaultPath := getLegacyVaultPath() + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + t.Fatal(err) + } + + // Expect agent secret created + wantSecret, err := secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + t.Fatal(err) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare, should be the same secret + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func generateTestHomePath(dataDir string) string { + suffix := uuid.Must(uuid.NewV4()).String()[:6] + return filepath.Join(dataDir, "elastic-agent-"+suffix) +} + +func generateTestConfig(vaultPath string) error { + fleetEncConfigFile := paths.AgentConfigFile() + store := storage.NewEncryptedDiskStore(fleetEncConfigFile, storage.WithVaultPath(vaultPath)) + return store.Save(strings.NewReader("foo")) +} + +func generateTestSecrets(dataDir string, count int, cfgType configType) (wantSecret secret.Secret, err error) { + now := time.Now() + + // Generate multiple home paths + //homePaths := make([]string, count) + for i := 0; i < count; i++ { + homePath := generateTestHomePath(dataDir) + k, err := vault.NewKey(vault.AES256) + if err != nil { + return wantSecret, err + } + + sec := secret.Secret{ + Value: k, + CreatedOn: now.Add(-time.Duration(i+1) * time.Minute), + } + + vaultPath := getLegacyVaultPathFromPath(homePath) + err = secret.SetAgentSecret(sec, secret.WithVaultPath(vaultPath)) + if err != nil { + return wantSecret, err + } + + switch cfgType { + case NoConfig: + case MatchingConfig, NonMatchingConfig: + if i == 0 { + wantSecret = sec + // Create matching encrypted config file, the content of the file doesn't matter for this test + err = generateTestConfig(vaultPath) + if err != nil { + return wantSecret, err + } + } + } + // Delete + if cfgType == NonMatchingConfig && i == 0 { + _ = os.RemoveAll(vaultPath) + wantSecret = secret.Secret{} + } + } + + return wantSecret, nil +} diff --git a/internal/pkg/agent/storage/encrypted_disk_store.go b/internal/pkg/agent/storage/encrypted_disk_store.go index 48027b3178f..be78e4235df 100644 --- a/internal/pkg/agent/storage/encrypted_disk_store.go +++ b/internal/pkg/agent/storage/encrypted_disk_store.go @@ -15,6 +15,7 @@ import ( "github.com/hectane/go-acl" "github.com/elastic/elastic-agent-libs/file" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -78,7 +79,7 @@ func (d *EncryptedDiskStore) ensureKey() error { if d.key == nil { key, err := secret.GetAgentSecret(secret.WithVaultPath(d.vaultPath)) if err != nil { - return err + return fmt.Errorf("could not get agent key: %w", err) } d.key = key.Value } diff --git a/internal/pkg/agent/transpiler/rules.go b/internal/pkg/agent/transpiler/rules.go index e4e466ddcd9..ca97cedd707 100644 --- a/internal/pkg/agent/transpiler/rules.go +++ b/internal/pkg/agent/transpiler/rules.go @@ -669,6 +669,42 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { namespace := datastreamNamespaceFromInputNode(inputNode) datastreamType := datastreamTypeFromInputNode(inputNode, r.Type) + var inputID *StrVal + inputIDNode, found := inputNode.Find("id") + if found { + inputID, _ = inputIDNode.Value().(*StrVal) + } + + if inputID != nil { + // get input-level processors node + processorsNode, found := inputNode.Find("processors") + if !found { + processorsNode = &Key{ + name: "processors", + value: &List{value: make([]Node, 0)}, + } + + inputMap, ok := inputNode.(*Dict) + if ok { + inputMap.value = append(inputMap.value, processorsNode) + } + } + + processorsList, ok := processorsNode.Value().(*List) + if !ok { + return errors.New("InjectStreamProcessorRule: input processors is not a list") + } + + // inject `input_id` on the input level + processorMap := &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) + processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ + &Key{name: "input_id", value: inputID}, + }}}) + addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} + processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + } + streamsNode, ok := inputNode.Find("streams") if !ok { continue @@ -680,6 +716,12 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { } for _, streamNode := range streamsList.value { + var streamID *StrVal + streamIDNode, ok := streamNode.Find("id") + if ok { + streamID, _ = streamIDNode.Value().(*StrVal) + } + streamMap, ok := streamNode.(*Dict) if !ok { continue @@ -722,6 +764,17 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { }}}) addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + + if streamID != nil { + // source stream + processorMap = &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) + processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ + &Key{name: "stream_id", value: streamID.Clone()}, + }}}) + addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} + processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + } } } diff --git a/internal/pkg/agent/transpiler/rules_test.go b/internal/pkg/agent/transpiler/rules_test.go index ab2df9c1bce..840e1442fde 100644 --- a/internal/pkg/agent/transpiler/rules_test.go +++ b/internal/pkg/agent/transpiler/rules_test.go @@ -165,6 +165,114 @@ inputs: }, }, + "inject stream": { + givenYAML: ` +inputs: + - name: No streams, no IDs + type: file + - name: With streams and IDs + id: input-id + type: file + data_stream.namespace: nsns + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + - name: With processors + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: some + fields: + dataset: value + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: another + fields: + dataset: value +`, + expectedYAML: ` +inputs: + - name: No streams, no IDs + type: file + - name: With streams and IDs + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: "@metadata" + fields: + input_id: input-id + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: data_stream + fields: + type: stream-type + namespace: nsns + dataset: dsds + - add_fields: + target: event + fields: + dataset: dsds + - add_fields: + target: "@metadata" + fields: + stream_id: stream-id + - name: With processors + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: some + fields: + dataset: value + - add_fields: + target: "@metadata" + fields: + input_id: input-id + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: another + fields: + dataset: value + - add_fields: + target: data_stream + fields: + type: stream-type + namespace: nsns + dataset: dsds + - add_fields: + target: event + fields: + dataset: dsds + - add_fields: + target: "@metadata" + fields: + stream_id: stream-id +`, + rule: &RuleList{ + Rules: []Rule{ + InjectStreamProcessor("insert_after", "stream-type"), + }, + }, + }, + "inject agent info": { givenYAML: ` inputs: diff --git a/internal/pkg/agent/transpiler/vars.go b/internal/pkg/agent/transpiler/vars.go index 8daacf606fe..e8f06a6928b 100644 --- a/internal/pkg/agent/transpiler/vars.go +++ b/internal/pkg/agent/transpiler/vars.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) -var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'"]*)}`) +var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'":\/]*)}`) // ErrNoMatch is return when the replace didn't fail, just that no vars match to perform the replace. var ErrNoMatch = fmt.Errorf("no matching vars") diff --git a/internal/pkg/agent/transpiler/vars_test.go b/internal/pkg/agent/transpiler/vars_test.go index 5dd6d41ec72..56e27694a33 100644 --- a/internal/pkg/agent/transpiler/vars_test.go +++ b/internal/pkg/agent/transpiler/vars_test.go @@ -17,12 +17,14 @@ import ( func TestVars_Replace(t *testing.T) { vars := mustMakeVars(map[string]interface{}{ "un-der_score": map[string]interface{}{ - "key1": "data1", - "key2": "data2", + "key1": "data1", + "key2": "data2", + "with-dash": "dash-value", "list": []string{ "array1", "array2", }, + "with/slash": "some/path", "dict": map[string]interface{}{ "key1": "value1", "key2": "value2", @@ -44,6 +46,12 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + "${un-der_score.with-dash}", + NewStrVal("dash-value"), + false, + false, + }, { "${un-der_score.missing}", NewStrVal(""), @@ -74,12 +82,24 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + `${"with:colon"}`, + NewStrVal("with:colon"), + false, + false, + }, { `${"direct"}`, NewStrVal("direct"), false, false, }, + { + `${un-der_score.missing|'with:colon'}`, + NewStrVal("with:colon"), + false, + false, + }, { `${un-der_score.}`, NewStrVal(""), @@ -149,6 +169,12 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + `${un-der_score.with/slash}`, + NewStrVal(`some/path`), + false, + false, + }, { `list inside string ${un-der_score.list} causes no match`, NewList([]Node{ diff --git a/internal/pkg/agent/vault/seed.go b/internal/pkg/agent/vault/seed.go index 698bd0f0135..773c42e7465 100644 --- a/internal/pkg/agent/vault/seed.go +++ b/internal/pkg/agent/vault/seed.go @@ -9,6 +9,8 @@ package vault import ( "errors" + "fmt" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -29,6 +31,24 @@ func getSeed(path string) ([]byte, error) { mxSeed.Lock() defer mxSeed.Unlock() + b, err := ioutil.ReadFile(fp) + if err != nil { + return nil, fmt.Errorf("could not read seed file: %w", err) + } + + // return fs.ErrNotExists if invalid length of bytes returned + if len(b) != int(AES256) { + return nil, fmt.Errorf("invalid seed length, expected: %v, got: %v: %w", int(AES256), len(b), fs.ErrNotExist) + } + return b, nil +} + +func createSeedIfNotExists(path string) ([]byte, error) { + fp := filepath.Join(path, seedFile) + + mxSeed.Lock() + defer mxSeed.Unlock() + b, err := ioutil.ReadFile(fp) if err != nil { if !errors.Is(err, os.ErrNotExist) { @@ -52,3 +72,10 @@ func getSeed(path string) ([]byte, error) { return seed, nil } + +func getOrCreateSeed(path string, readonly bool) ([]byte, error) { + if readonly { + return getSeed(path) + } + return createSeedIfNotExists(path) +} diff --git a/internal/pkg/agent/vault/seed_test.go b/internal/pkg/agent/vault/seed_test.go index bb9197ea614..d10be29634f 100644 --- a/internal/pkg/agent/vault/seed_test.go +++ b/internal/pkg/agent/vault/seed_test.go @@ -10,12 +10,14 @@ package vault import ( "context" "encoding/hex" + "io/fs" "path/filepath" "sync" "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) @@ -24,12 +26,45 @@ func TestGetSeed(t *testing.T) { fp := filepath.Join(dir, seedFile) + require.NoFileExists(t, fp) + + // seed is not yet created + _, err := getSeed(dir) + + // should be not found + require.ErrorIs(t, err, fs.ErrNotExist) + + b, err := createSeedIfNotExists(dir) + assert.NoError(t, err) + + require.FileExists(t, fp) + + diff := cmp.Diff(int(AES256), len(b)) + if diff != "" { + t.Error(diff) + } + + // try get seed + gotSeed, err := getSeed(dir) + assert.NoError(t, err) + + diff = cmp.Diff(b, gotSeed) + if diff != "" { + t.Error(diff) + } +} + +func TestCreateSeedIfNotExists(t *testing.T) { + dir := t.TempDir() + + fp := filepath.Join(dir, seedFile) + assert.NoFileExists(t, fp) - b, err := getSeed(dir) + b, err := createSeedIfNotExists(dir) assert.NoError(t, err) - assert.FileExists(t, fp) + require.FileExists(t, fp) diff := cmp.Diff(int(AES256), len(b)) if diff != "" { @@ -37,7 +72,7 @@ func TestGetSeed(t *testing.T) { } } -func TestGetSeedRace(t *testing.T) { +func TestCreateSeedIfNotExistsRace(t *testing.T) { var err error dir := t.TempDir() @@ -51,7 +86,7 @@ func TestGetSeedRace(t *testing.T) { for i := 0; i < count; i++ { g.Go(func(idx int) func() error { return func() error { - seed, err := getSeed(dir) + seed, err := createSeedIfNotExists(dir) mx.Lock() res[idx] = seed mx.Unlock() diff --git a/internal/pkg/agent/vault/vault_darwin.go b/internal/pkg/agent/vault/vault_darwin.go index 4119b27a586..5f63a496179 100644 --- a/internal/pkg/agent/vault/vault_darwin.go +++ b/internal/pkg/agent/vault/vault_darwin.go @@ -37,13 +37,15 @@ type Vault struct { } // New initializes the vault store -// Call Close when done to release the resouces -func New(name string) (*Vault, error) { +// Call Close when done to release the resources +func New(name string, opts ...OptionFunc) (*Vault, error) { var keychain C.SecKeychainRef + err := statusToError(C.OpenKeychain(keychain)) if err != nil { - return nil, err + return nil, fmt.Errorf("could not open keychain: %w", err) } + return &Vault{ name: name, keychain: keychain, diff --git a/internal/pkg/agent/vault/vault_linux.go b/internal/pkg/agent/vault/vault_linux.go index a3737d5c625..51f6a3fa651 100644 --- a/internal/pkg/agent/vault/vault_linux.go +++ b/internal/pkg/agent/vault/vault_linux.go @@ -11,6 +11,7 @@ import ( "crypto/rand" "crypto/sha256" "errors" + "fmt" "io/fs" "io/ioutil" "os" @@ -29,28 +30,39 @@ type Vault struct { mx sync.Mutex } -// Open initializes the vault store -func New(path string) (*Vault, error) { +// New creates the vault store +func New(path string, opts ...OptionFunc) (v *Vault, err error) { + options := applyOptions(opts...) dir := filepath.Dir(path) // If there is no specific path then get the executable directory if dir == "." { exefp, err := os.Executable() if err != nil { - return nil, err + return nil, fmt.Errorf("could not get executable path: %w", err) } dir = filepath.Dir(exefp) path = filepath.Join(dir, path) } - err := os.MkdirAll(path, 0750) - if err != nil { - return nil, err + if options.readonly { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fs.ErrNotExist + } + } else { + err := os.MkdirAll(path, 0750) + if err != nil { + return nil, fmt.Errorf("failed to create vault path: %v, err: %w", path, err) + } } - key, err := getSeed(path) + key, err := getOrCreateSeed(path, options.readonly) if err != nil { - return nil, err + return nil, fmt.Errorf("could not get seed to create new valt: %w", err) } return &Vault{ diff --git a/internal/pkg/agent/vault/vault_options.go b/internal/pkg/agent/vault/vault_options.go new file mode 100644 index 00000000000..2673ae6aa53 --- /dev/null +++ b/internal/pkg/agent/vault/vault_options.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package vault + +type Options struct { + readonly bool +} + +type OptionFunc func(o *Options) + +func WithReadonly(readonly bool) OptionFunc { + return func(o *Options) { + o.readonly = readonly + } +} + +//nolint:unused // not used on darwin +func applyOptions(opts ...OptionFunc) Options { + var options Options + + for _, opt := range opts { + opt(&options) + } + + return options +} diff --git a/internal/pkg/agent/vault/vault_windows.go b/internal/pkg/agent/vault/vault_windows.go index 7468fe16814..c39769cc8da 100644 --- a/internal/pkg/agent/vault/vault_windows.go +++ b/internal/pkg/agent/vault/vault_windows.go @@ -27,7 +27,8 @@ type Vault struct { } // Open initializes the vault store -func New(path string) (*Vault, error) { +func New(path string, opts ...OptionFunc) (v *Vault, err error) { + options := applyOptions(opts...) dir := filepath.Dir(path) // If there is no specific path then get the executable directory @@ -40,16 +41,26 @@ func New(path string) (*Vault, error) { path = filepath.Join(dir, path) } - err := os.MkdirAll(path, 0750) - if err != nil { - return nil, err - } - err = systemAdministratorsOnly(path, false) - if err != nil { - return nil, err + if options.readonly { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fs.ErrNotExist + } + } else { + err := os.MkdirAll(path, 0750) + if err != nil { + return nil, err + } + err = systemAdministratorsOnly(path, false) + if err != nil { + return nil, err + } } - entropy, err := getSeed(path) + entropy, err := getOrCreateSeed(path, options.readonly) if err != nil { return nil, err } diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index 7d8abfcea4e..ab19e7d2ce2 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -26,7 +26,9 @@ func TestGenerateNodeData(t *testing.T) { Name: "testnode", UID: types.UID(uid), Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "baz": "ban", @@ -54,7 +56,9 @@ func TestGenerateNodeData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -64,7 +68,11 @@ func TestGenerateNodeData(t *testing.T) { "name": "devcluster", "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ - "labels": mapstr.M{"foo": "bar"}, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, "annotations": mapstr.M{"baz": "ban"}, "node": mapstr.M{ "ip": "node1", @@ -123,7 +131,9 @@ func (n *nodeMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOp "ip": "node1", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index 45fd78ac76c..95361fd2ce0 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -27,7 +27,9 @@ func TestGeneratePodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -59,7 +61,9 @@ func TestGeneratePodData(t *testing.T) { "nsa": "nsb", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "app": "production", @@ -74,7 +78,9 @@ func TestGeneratePodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ @@ -119,7 +125,9 @@ func TestGenerateContainerPodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -175,7 +183,9 @@ func TestGenerateContainerPodData(t *testing.T) { "app": "production", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -191,7 +201,11 @@ func TestGenerateContainerPodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "annotations": mapstr.M{"app": "production"}, - "labels": mapstr.M{"foo": "bar"}, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, "pod": mapstr.M{ "ip": "127.0.0.5", "name": "testpod", @@ -232,7 +246,9 @@ func TestEphemeralContainers(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -274,7 +290,9 @@ func TestEphemeralContainers(t *testing.T) { "ip": pod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "container": mapstr.M{ "id": "asdfghdeadbeef", @@ -300,8 +318,12 @@ func TestEphemeralContainers(t *testing.T) { "name": "devcluster", "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ - "namespace": "testns", - "labels": mapstr.M{"foo": "bar"}, + "namespace": "testns", + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ "ip": "127.0.0.5", @@ -383,7 +405,9 @@ func (p *podMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": k8sPod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "app": "production", diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 47d420fb233..69e945ee1cd 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -25,7 +25,9 @@ func TestGenerateServiceData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "baz": "ban", @@ -64,7 +66,9 @@ func TestGenerateServiceData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -80,7 +84,9 @@ func TestGenerateServiceData(t *testing.T) { "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", @@ -139,7 +145,9 @@ func (s *svcMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/eql/Eql.g4 b/internal/pkg/eql/Eql.g4 index d46e2571812..bb7b5a88770 100644 --- a/internal/pkg/eql/Eql.g4 +++ b/internal/pkg/eql/Eql.g4 @@ -22,7 +22,7 @@ NUMBER: [\-]? [0-9]+; WHITESPACE: [ \r\n\t]+ -> skip; NOT: 'NOT' | 'not'; NAME: [a-zA-Z_] [a-zA-Z0-9_]*; -VNAME: [a-zA-Z0-9_.]+('.'[a-zA-Z0-9_]+)*; +VNAME: [a-zA-Z0-9_.\-/]+('.'[a-zA-Z0-9_\-/]+)*; STEXT: '\'' ~[\r\n']* '\''; DTEXT: '"' ~[\r\n"]* '"'; LPAR: '('; diff --git a/internal/pkg/eql/eql_test.go b/internal/pkg/eql/eql_test.go index eab34f69026..54f7741f88d 100644 --- a/internal/pkg/eql/eql_test.go +++ b/internal/pkg/eql/eql_test.go @@ -42,6 +42,9 @@ func TestEql(t *testing.T) { {expression: "${env.MISSING|host.MISSING|true} == true", result: true}, {expression: "${env.MISSING|host.MISSING|false} == false", result: true}, {expression: "${'constant'} == 'constant'", result: true}, + {expression: "${data.with-dash} == 'dash-value'", result: true}, + {expression: "${'dash-value'} == 'dash-value'", result: true}, + {expression: "${data.with/slash} == 'some/path'", result: true}, // boolean {expression: "true", result: true}, @@ -306,9 +309,11 @@ func TestEql(t *testing.T) { store := &testVarStore{ vars: map[string]interface{}{ - "env.HOSTNAME": "my-hostname", - "host.name": "host-name", - "data.array": []interface{}{"array1", "array2", "array3"}, + "env.HOSTNAME": "my-hostname", + "host.name": "host-name", + "data.array": []interface{}{"array1", "array2", "array3"}, + "data.with-dash": "dash-value", + "data.with/slash": "some/path", "data.dict": map[string]interface{}{ "key1": "dict1", "key2": "dict2", @@ -327,7 +332,7 @@ func TestEql(t *testing.T) { } t.Run(title, func(t *testing.T) { if showDebug == "1" { - debug(test.expression) + debug(t, test.expression) } r, err := Eval(test.expression, store) @@ -343,17 +348,17 @@ func TestEql(t *testing.T) { } } -func debug(expression string) { +func debug(t *testing.T, expression string) { raw := antlr.NewInputStream(expression) lexer := parser.NewEqlLexer(raw) for { - t := lexer.NextToken() - if t.GetTokenType() == antlr.TokenEOF { + token := lexer.NextToken() + if token.GetTokenType() == antlr.TokenEOF { break } - fmt.Printf("%s (%q)\n", - lexer.SymbolicNames[t.GetTokenType()], t.GetText()) + t.Logf("%s (%q)\n", + lexer.SymbolicNames[token.GetTokenType()], token.GetText()) } } diff --git a/internal/pkg/eql/parser/EqlLexer.interp b/internal/pkg/eql/parser/EqlLexer.interp index 2131aba8177..66413a00c42 100644 --- a/internal/pkg/eql/parser/EqlLexer.interp +++ b/internal/pkg/eql/parser/EqlLexer.interp @@ -113,4 +113,4 @@ mode names: DEFAULT_MODE atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 10, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 48, 48, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 6, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 8, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 9, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 47, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 47, 49, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file diff --git a/internal/pkg/eql/parser/eql_lexer.go b/internal/pkg/eql/parser/eql_lexer.go index da1bf4d112e..b8eb1eeed6d 100644 --- a/internal/pkg/eql/parser/eql_lexer.go +++ b/internal/pkg/eql/parser/eql_lexer.go @@ -46,84 +46,85 @@ var serializedLexerAtn = []uint16{ 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, - 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 10, 3, 2, 47, + 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, - 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 48, 48, 50, 59, 67, 92, - 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, - 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, - 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, - 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, - 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, - 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, - 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, - 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, - 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, - 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, - 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, - 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, - 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, - 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, - 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, - 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, - 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, - 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, - 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, - 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, - 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, - 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, - 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, - 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, - 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, - 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, - 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, - 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, - 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, - 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, - 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, - 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, - 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, - 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, - 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, - 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, - 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, - 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, - 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, - 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, - 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, - 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, - 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, - 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, - 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, - 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, - 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, - 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, - 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, - 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, - 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, - 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, - 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, - 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, - 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, - 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, - 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, - 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, - 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, - 189, 9, 6, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, - 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, - 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, - 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, - 200, 10, 8, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, - 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, - 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, - 207, 209, 10, 9, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, - 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, - 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, - 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, - 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, - 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, - 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, - 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, - 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2, + 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 47, 59, 67, 92, 97, 97, + 99, 124, 7, 2, 47, 47, 49, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, + 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, + 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, + 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, + 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, + 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, + 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, + 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, + 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, + 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, + 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, + 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, + 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, + 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, + 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, + 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, + 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, + 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, + 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, + 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, + 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, + 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, + 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, + 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, + 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, + 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, + 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, + 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, + 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, + 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, + 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, + 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, + 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, + 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, + 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, + 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, + 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, + 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, + 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, + 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, + 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, + 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, + 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, + 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, + 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, + 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, + 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, + 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, + 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, + 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, + 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, + 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, + 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, + 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, + 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, + 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, + 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, + 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, + 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, + 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, + 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, + 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, + 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, + 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, + 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, + 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, + 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, + 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, + 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, + 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, + 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, + 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, + 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, + 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, + 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, + 3, 8, 2, 2, } var lexerDeserializer = antlr.NewATNDeserializer(nil) diff --git a/internal/pkg/fileutil/fileutil.go b/internal/pkg/fileutil/fileutil.go new file mode 100644 index 00000000000..86d1db249aa --- /dev/null +++ b/internal/pkg/fileutil/fileutil.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fileutil + +import ( + "errors" + "io/fs" + "os" + "time" +) + +// FileExists returns true if file/dir exists +func FileExists(fp string) (bool, error) { + _, err := os.Stat(fp) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + return false, err + } + return true, nil +} + +// GetModTime returns file modification time +func GetModTime(fp string) (time.Time, error) { + fi, err := os.Stat(fp) + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil +} + +// GetModTimeExists returns file modification time and existence status +// Returns no error if the file doesn't exists +func GetModTimeExists(fp string) (time.Time, bool, error) { + modTime, err := GetModTime(fp) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return modTime, false, nil + } + return modTime, false, err + } + return modTime, true, nil +} diff --git a/magefile.go b/magefile.go index 9ddfe7bb7fb..ead48b5ee42 100644 --- a/magefile.go +++ b/magefile.go @@ -822,6 +822,7 @@ func packageAgent(requiredPackages []string, packagingFn func()) { mg.Deps(CrossBuild, CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, TestPackages) } + func copyComponentSpecs(componentName, versionedDropPath string) (string, error) { sourceSpecFile := filepath.Join("specs", componentName+specSuffix) targetPath := filepath.Join(versionedDropPath, componentName+specSuffix) @@ -1011,3 +1012,111 @@ type checksumFile struct { Name string `yaml:"name"` Checksum string `yaml:"sha512"` } + +// Package packages elastic-agent for the IronBank distribution, relying on the +// binaries having already been built. +// +// Use SNAPSHOT=true to build snapshots. +func Ironbank() error { + if runtime.GOARCH != "amd64" { + fmt.Printf(">> IronBank images are only supported for amd64 arch (%s is not supported)\n", runtime.GOARCH) + return nil + } + if err := prepareIronbankBuild(); err != nil { + return errors.Wrap(err, "failed to prepare the IronBank context") + } + if err := saveIronbank(); err != nil { + return errors.Wrap(err, "failed to save artifacts for IronBank") + } + return nil +} + +func saveIronbank() error { + fmt.Println(">> saveIronbank: save the IronBank container context.") + + ironbank := getIronbankContextName() + buildDir := filepath.Join("build", ironbank) + if _, err := os.Stat(buildDir); os.IsNotExist(err) { + return fmt.Errorf("cannot find the folder with the ironbank context: %+v", err) + } + + distributionsDir := "build/distributions" + if _, err := os.Stat(distributionsDir); os.IsNotExist(err) { + err := os.MkdirAll(distributionsDir, 0750) + if err != nil { + return fmt.Errorf("cannot create folder for docker artifacts: %+v", err) + } + } + + // change dir to the buildDir location where the ironbank folder exists + // this will generate a tar.gz without some nested folders. + wd, _ := os.Getwd() + os.Chdir(buildDir) + defer os.Chdir(wd) + + // move the folder to the parent folder, there are two parent folder since + // buildDir contains a two folders dir. + tarGzFile := filepath.Join("..", "..", distributionsDir, ironbank+".tar.gz") + + // Save the build context as tar.gz artifact + err := devtools.Tar("./", tarGzFile) + if err != nil { + return fmt.Errorf("cannot compress the tar.gz file: %+v", err) + } + + return errors.Wrap(devtools.CreateSHA512File(tarGzFile), "failed to create .sha512 file") +} + +func getIronbankContextName() string { + version, _ := devtools.BeatQualifiedVersion() + defaultBinaryName := "{{.Name}}-ironbank-{{.Version}}{{if .Snapshot}}-SNAPSHOT{{end}}" + outputDir, _ := devtools.Expand(defaultBinaryName+"-docker-build-context", map[string]interface{}{ + "Name": "elastic-agent", + "Version": version, + }) + return outputDir +} + +func prepareIronbankBuild() error { + fmt.Println(">> prepareIronbankBuild: prepare the IronBank container context.") + buildDir := filepath.Join("build", getIronbankContextName()) + templatesDir := filepath.Join("dev-tools", "packaging", "templates", "ironbank") + + data := map[string]interface{}{ + "MajorMinor": majorMinor(), + } + + err := filepath.Walk(templatesDir, func(path string, info os.FileInfo, _ error) error { + if !info.IsDir() { + target := strings.TrimSuffix( + filepath.Join(buildDir, filepath.Base(path)), + ".tmpl", + ) + + err := devtools.ExpandFile(path, target, data) + if err != nil { + return errors.Wrapf(err, "expanding template '%s' to '%s'", path, target) + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("cannot create templates for the IronBank: %+v", err) + } + + // copy files + sourcePath := filepath.Join("dev-tools", "packaging", "files", "ironbank") + if err := devtools.Copy(sourcePath, buildDir); err != nil { + return fmt.Errorf("cannot create files for the IronBank: %+v", err) + } + return nil +} + +func majorMinor() string { + if v, _ := devtools.BeatQualifiedVersion(); v != "" { + parts := strings.SplitN(v, ".", 3) + return parts[0] + "." + parts[1] + } + return "" +} diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9a64bdc5d95..b8b6792b912 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-7e67f5d9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d058e92f-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-7e67f5d9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-d058e92f-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing"